prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pathlib
import pandas as pd
import requests
from google.cloud import storage
def main(
source_url_json: str,
source_file: pathlib.Path,
target_file: pathlib.Path,
chunksize: str,
target_gcs_bucket: str,
target_gcs_path: str,
) -> None:
logging.info("San Francisco - Bikeshare Status process started")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
logging.info(f"Extracting URL for status: {source_url_json}")
source_file_status_json = str(source_file).replace(".csv", "") + "_status.json"
logging.info(f"Downloading states json file {source_url_json}")
download_file_json(source_url_json, source_file_status_json, source_file)
chunksz = int(chunksize)
logging.info(f"Opening batch file {source_file}")
with pd.read_csv(
source_file, # path to main source file to load in batches
engine="python",
encoding="utf-8",
quotechar='"', # string separator, typically double-quotes
chunksize=chunksz, # size of batch data, in no. of records
sep=",", # data column separator, typically ","
) as reader:
for chunk_number, chunk in enumerate(reader):
target_file_batch = str(target_file).replace(
".csv", "-" + str(chunk_number) + ".csv"
)
df = | pd.DataFrame() | pandas.DataFrame |
# Rutina que preprocesa y transforma los datos para series de tiempo
# <NAME>
# <NAME>
# ------------------------------------------------------------------
# Entrada: 2 o mas archivos .csv asincronos.
# Salida: Archivo binario hdf5 con chunks de datos sincronizados
#
# Cada archivo csv debe tener una columna temporal, con un sampling
# que puede ser arbitrario y asincrono respecto a los otros archivos
# Pueden haber gaps en los datos
#
# El parametro de entrada es un archivo json del tipo:
# config/sma_uai_alldata.json
# Este archivo contiene informacion de los archivos de entrada/salida,
# las columnas y variables de conversion.
#
# El programa toma los archivos y busca bloques continuos de datos
# en donde ninguna columna tenga un gap mayor a
# "max_delta_between_blocks_sec" segundos, si es menor
# los datos se interpolaran
# Cada bloque continuo de datos o "chunk" debe tener una duracion
# minima de "min_chunk_duration_sec" segundos o es descartado
# Luego los datos son sincronizados con un sampling uniforme
# dado por "sample_spacing"
#
# como resultado en el archivo hdf5 se esribiran multiples chunks
# de datos continuos, si "min_chunk_duration_sec" es muy grande
# y se genera 1 o muy pocos chunks, no es conveniente ya que
# para construir el train/testing set deben haber suficientes
# chunks para asignarse a cada set. i.e. 20 chunks iguales
# implica un split de train/testing con un error del 5% al menos
# respecto al ratio definido.
import os
import sys
import json
import pandas
import numpy
# busca bloques continuos de tiempo donde no ocurra una separacion
# de tiempo mayor que maxStep y donde el bloque tenga un largo al
# menos de minDuration.
def get_cont_chunks(df, date_col, maxStep, minDuration):
timeArray = df[date_col]
# indexes where a discontinuity in time occurs
#ojo que funciona solo para numpy 1.20+
idxs = numpy.where(numpy.diff(timeArray) > maxStep)[0]
if len(idxs) == 0:
# trick: add a last time sample with a huge jump to make the routine
# consider a contiguous block as a single block of data
timeArray_hack = numpy.concatenate([timeArray, 1e45])
numpy.where(numpy.diff(timeArray_hack) > maxStep)[0]
return [0, timeArray.size]
print("found {} discontinuities".format(len(idxs)))
leftIdx = 0
rightIdx = -1
interval_list = list()
for idx in idxs:
rightIdx = idx
duration = timeArray[rightIdx] - timeArray[leftIdx]
if duration > minDuration:
interv = pandas.Interval(timeArray[leftIdx], timeArray[rightIdx])
interval_list.append(interv)
leftIdx = rightIdx + 1
intervals = pandas.arrays.IntervalArray(interval_list)
return intervals
if __name__ == '__main__':
# Lee el archivo json con la configuracion de los datos
config_path = sys.argv[1]
# Mismas rutinas de carga ocupadas a lo largo del codigo.
config_file = open(config_path, 'r')
ds_config = json.loads(config_file.read())
data_config = ds_config['dataset']
config_file.close()
chunk_config = data_config.get("chunk_config")
training_config = data_config.get("training_config")
source_configs = data_config.get("data_sources")
hdf5_path = os.path.join(chunk_config['path_to_hdf5'], chunk_config['tag'])
sample_spacing_min = chunk_config['sample_spacing']
min_chunk_duration_sec = chunk_config["min_chunk_duration_sec"]
max_sync_block_dt_sec = chunk_config["max_delta_between_blocks_sec"]
min_date = pandas.to_datetime(chunk_config.get('min_date', "1900-01-01T00:00:00"))
max_date = pandas.to_datetime(chunk_config.get('max_date', "2100-12-31T00:00:00"))
print("deleting ",hdf5_path)
try:
os.remove(hdf5_path)
print("file deleted")
except:
print("file does not exist")
dfs = list()
cont_date_intervals = list()
for source_name, source_info in source_configs.items():
print("[INFO] loading source {}".format(source_name))
fpath = source_info['file']
field_list = source_info['field_list']
cos_sin_fields = source_info.get('cos_sin_fields', None)
date_col = source_info.get('date_time_column_name', 'Date time')
date_fmt = source_info.get('date_time_column_format', "%Y-%m-%dT%H:%M:%S")
data = pandas.read_csv(fpath)
data = data.dropna()
data = data.reset_index(drop=True)
data['master_datetime'] = pandas.to_datetime(data[date_col], format=date_fmt)
data = data.drop([date_col], axis=1)
# Genera cos y sin de la hora del dia.
if cos_sin_fields is not None:
for func, pname in zip([numpy.cos, numpy.sin], ["Cosine", "Sine"]):
for fname in cos_sin_fields:
new_fname = pname + " " + fname
field_data = data[fname]
if 'deg' in fname:
field_data = field_data*numpy.pi/180.0
data[new_fname] = field_data
# generamos los chunks continuos de datos.
chunks = get_cont_chunks(
data, 'master_datetime',
pandas.Timedelta(max_sync_block_dt_sec, unit='s'),
pandas.Timedelta(min_chunk_duration_sec, unit='s'))
dfs.append(data)
cont_date_intervals.append(chunks)
print(chunks)
# sincronizamos y escribimos cada chunk
hdfs = pandas.HDFStore(hdf5_path)
ik = 0
n_samples = 0
data_overlaps = list()
intervals_i = cont_date_intervals[0]
for inter_i in intervals_i:
n_source_overlaps = 0
interval_overlaps = list()
# check overlap
for intervals_j in cont_date_intervals[1::]:
source_overlaps = list()
overlap_mask = intervals_j.overlaps(inter_i)
overlap_inter = intervals_j[overlap_mask]
if len(overlap_inter) > 0:
n_source_overlaps += 1
# find overlaps
for overlap in overlap_inter:
o_left = max(inter_i.left, overlap.left)
o_right = min(inter_i.right, overlap.right)
source_overlaps.append(pandas.Interval(o_left, o_right, closed='neither')) #closed='neither' orig code
interval_overlaps.append(source_overlaps)
interval_overlaps.append(source_overlaps)
# si es verdadero hay overlap entre todas las fuentes
if n_source_overlaps != len(source_configs.keys()) - 1:
continue
# construye el dataframe con todas las columnas que coinciden
resample_ok = True
all_slices = list()
for src_idx, src_os in enumerate(interval_overlaps):
df = dfs[src_idx]
src_o = src_os[0]
odf = df[(df['master_datetime'] >= src_o.left) & (df['master_datetime'] <= src_o.right)] #seleccionamos el chunk
odf = odf.set_index(pandas.DatetimeIndex(odf['master_datetime']))
odf = odf.drop(['master_datetime'], axis=1)
odf = odf.dropna()
try:
df_slice = odf.resample(sample_spacing_min, closed=None).bfill()
all_slices.append(df_slice)
except:
resample_ok = False
print("fail")
break
if resample_ok:
synced_df = pandas.concat(all_slices, axis=1, join='inner')
if len(synced_df) > 1:
# cos/sin day
datetime = synced_df.index.to_series()
if datetime.iloc[0] < min_date:
continue
if datetime.iloc[-1] > max_date:
continue
print(datetime.iloc[0], "to", datetime.iloc[-1])
sec_day = (datetime - datetime.dt.normalize())/ | pandas.Timedelta(seconds=1) | pandas.Timedelta |
import pandas as pd
from sklearn.model_selection import train_test_split
pd.options.mode.chained_assignment = None
def data_preprocessing():
df = pd.read_csv("data/SCADA_data.csv.gz")
status_data_wec = pd.read_csv("data/status_data_wec.csv")
df["Inverter avg. temp"] = df[
[
"CS101 : Sys 1 inverter 1 cabinet temp.",
"CS101 : Sys 1 inverter 2 cabinet temp.",
"CS101 : Sys 1 inverter 3 cabinet temp.",
"CS101 : Sys 1 inverter 4 cabinet temp.",
"CS101 : Sys 1 inverter 5 cabinet temp.",
"CS101 : Sys 1 inverter 6 cabinet temp.",
"CS101 : Sys 1 inverter 7 cabinet temp.",
"CS101 : Sys 2 inverter 1 cabinet temp.",
"CS101 : Sys 2 inverter 2 cabinet temp.",
"CS101 : Sys 2 inverter 3 cabinet temp.",
"CS101 : Sys 2 inverter 4 cabinet temp.",
]
].mean(axis=1)
df["Inverter std. temp"] = df[
[
"CS101 : Sys 1 inverter 1 cabinet temp.",
"CS101 : Sys 1 inverter 2 cabinet temp.",
"CS101 : Sys 1 inverter 3 cabinet temp.",
"CS101 : Sys 1 inverter 4 cabinet temp.",
"CS101 : Sys 1 inverter 5 cabinet temp.",
"CS101 : Sys 1 inverter 6 cabinet temp.",
"CS101 : Sys 1 inverter 7 cabinet temp.",
"CS101 : Sys 2 inverter 1 cabinet temp.",
"CS101 : Sys 2 inverter 2 cabinet temp.",
"CS101 : Sys 2 inverter 3 cabinet temp.",
"CS101 : Sys 2 inverter 4 cabinet temp.",
]
].std(axis=1)
df["Time"] = pd.to_datetime(df["Time"], dayfirst=True, errors="coerce")
df.sort_values(by="Time", axis=0, inplace=True)
df.reset_index(drop=True, inplace=True)
af_corr_time_wec_s = status_data_wec.loc[
(status_data_wec["Main Status"] == 62)
| (status_data_wec["Main Status"] == 80)
| (status_data_wec["Main Status"] == 228)
| (status_data_wec["Main Status"] == 60)
| (status_data_wec["Main Status"] == 9),
"Time",
]
af_corr_time_wec_s = | pd.to_datetime(af_corr_time_wec_s) | pandas.to_datetime |
from neurospyke.cell import Cell
import glob
import os
import pandas as pd
import pickle
cache_dir = 'cached_data/'
cell_cache_dir = cache_dir + 'cells/'
query_cache_dir = cache_dir + 'queries/'
os.makedirs(query_cache_dir, exist_ok=True)
os.makedirs(cell_cache_dir, exist_ok=True)
def calc_pickle_cell_path(mat_cell_path):
# Cell files have unique names
mat_name = os.path.basename(os.path.normpath(mat_cell_path))
pickle_name = mat_name.replace('.mat', '.pickle')
return cell_cache_dir + pickle_name
def load_cell(mat_cell_path, to_pickle=True):
pickle_cell_path = calc_pickle_cell_path(mat_cell_path)
try:
with open(pickle_cell_path, 'rb') as f:
cell = pickle.load(f)
return cell
except:
cell = Cell(mat_cell_path)
if to_pickle:
with open(pickle_cell_path, 'wb') as f:
pickle.dump(cell, f)
return cell
def load_cells(data_dir_path, to_pickle=True):
paths = glob.glob(data_dir_path)
cells = [load_cell(path, to_pickle) for path in paths]
assert len(cells)>0, f"no cells were found in {data_dir_path}"
return cells
def concat_dfs_by_index(df1, df2):
cols_to_use = df2.columns.difference(df1.columns)
combined_df = | pd.concat([df1, df2[cols_to_use]], axis=1, join_axes=[df1.index]) | pandas.concat |
# import cv2
import os,glob
import os
import sys
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import seaborn as sn
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
cwd = os.getcwd()
print(cwd)
print(ROOT_DIR)
modelFile = "mask_rcnn_ship_0499.h5"
current_dir = "./test"
modelfile_list = modelFile.split("_")
dirs = os.listdir(current_dir)
mainFolder ="./Test_Sonuçları/"
# os.mkdir(mainFolder)
mapScoresFile = "mapScores.csv"
confMatrixFile = "confMatrixCounts"
confMatrixNormalizedFile= "confMatrixNormalized"
confMatrixImageFileNorm = mainFolder +"confMatrixImageNorm.png"
confMatrixImageFile = mainFolder+ "confMatrixImage.png"
import ShipClassification
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
config = ShipClassification.ShipClassificationConfig()
# Override the training configurations with a few
# changes for inferencing.
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE =0.3
NUM_CLASSES = 1 + 12
BACKBONE ="resnet101"
config = InferenceConfig()
config.display()
# Device to load the neural network on.
# Useful if you're training a model on the same
# machine, in which case use CPU and leave the
# GPU for training.
DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
# Inspect the model in training or inference modes
# values: 'inference' or 'training'
# TODO: code for 'training' test mode not ready yet
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
# Create model in inference mode
print(MODEL_DIR)
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
import numpy as np
import skimage.draw
classNames = {}
classNames[1] = "Balıkçı"
classNames[2] = "Askeri Tip-1"
classNames[3] = "Askeri Tip-2"
classNames[4] = "Askeri Tip-3"
classNames[5] = "Tanker"
classNames[6] = "Destek_Gemisi"
classNames[7] = "Askeri Tip-4"
classNames[8] = "Askeri Tip-5"
classNames[9] = "Askeri Tip-6"
classNames[10] = "Askeri Tip-7"
classNames[11] = "Konteyner"
classNames[12] = "Askeri Tip-8"
def get_colors_for_class_ids(class_ids):
colors = []
for class_id in class_ids:
if (class_id == 1):
colors.append((.904, .204, .204))
elif (class_id == 2):
colors.append((.164, .196, .0))
elif (class_id == 3):
colors.append((.250, .104, .0))
elif (class_id == 4):
colors.append((.122, .59, .63))
elif (class_id == 5):
colors.append((.229, .20, .0))
elif (class_id == 6):
colors.append((.27, .61, .226))
elif (class_id == 7):
colors.append((.0, .0, .0))
elif (class_id == 8):
colors.append((.130, .90, .44))
elif (class_id == 9):
colors.append((.229, .20, .0))
elif (class_id == 10):
colors.append((.0, .71, .169))
elif (class_id == 11):
colors.append((.96, .169, .23))
elif (class_id == 12):
colors.append((.0, .71, .169))
return colors
WEIGHTS_PATH = modelFile # TODO: update this path
weights_path = WEIGHTS_PATH
# Load weights
print("Loading weights ", weights_path)
with tf.device(DEVICE):
model.load_weights(weights_path, by_name=True)
listOfValues=[]
"""
Klasörler ve isimler yazılıyor.
"""
labels = {}
classes =[]
f = open("labels.txt", "r")
index=0
for x in f:
labels.update({x.replace("\n",""):index})
index=index+1
classes.append(x.replace("\n",""))
def plot_confusion_matrix(frmt,title,filename,cm, classes,
normalize=False,
cmap=plt.cm.Blues):
plt.figure(figsize=(10, 10),dpi=500)
sn.set(font_scale=1)
cm_df_decisionRatesWOConfMetrics = pd.DataFrame(cm, index=classes, columns=classes)
p = sn.heatmap(cm_df_decisionRatesWOConfMetrics, annot=True, cmap='Blues', annot_kws={"size":10}, linewidths=0.01, cbar=False, fmt=frmt)
p.set_xticklabels(p.get_xticklabels(),rotation=45,va="top", ha="right")
# plt.text(12, -0.8, title, fontsize=10, color='Black', fontstyle='italic', va="bottom", ha="center")
plt.tight_layout()
plt.savefig(filename)
return plt
conf_matrix = np.zeros((12,12))
def add_To_Conf_Matrix(result,classNo):
if (len(result)>0):
valueOfPredict = r['class_ids'][0]
conf_matrix[classNo-1][valueOfPredict-1] = conf_matrix[classNo-1][valueOfPredict-1] +1
returnVal= True
else:
returnVal= False
return returnVal
totalFile= 0
totalLoss =0
for folderName in range (1,13):
tempdir =current_dir+"/"+str(folderName)
dir_len = len([f for f in glob.glob(tempdir + "**/*.jpg", recursive=True)])
for pathAndFilename in glob.iglob(os.path.join(tempdir, "*.jpg")):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
image = skimage.io.imread(pathAndFilename)
results = model.detect([image]) # , verbose=1)
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
classNames, r['scores'],
colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])
print("Sınıflar: ", r['class_ids'], " Scores:", r['scores'])
# plt.show()
plt.savefig(current_dir+"/Sonuç_resimler/"+str(folderName)+"/"+title+".png")
# print(result)
returnValue = add_To_Conf_Matrix(r['class_ids'],folderName)
if (returnValue==True):
totalFile=totalFile +1
else:
totalLoss = totalLoss +1
#Confusion matrix normalize ediliyor.
row_sums = conf_matrix.sum(axis=1)
conf_matrix_normed = (conf_matrix / row_sums[:, np.newaxis]) *100
#Accuracy Computing
sumsTotal = conf_matrix.sum(axis=0)
for i in range(0,12):
temp_dict={}
temp_TP = (conf_matrix[i][i])
temp_FP = (sumsTotal[i]-temp_TP)
temp_Precision = temp_TP/(temp_FP+temp_TP)
# temp_dict.update({"ClassLabel":classes[i]})
temp_dict.update({'TP':temp_TP})
temp_dict.update({'FP':temp_FP})
temp_dict.update({'mAP':temp_Precision})
listOfValues.append(temp_dict)
sumOfPR = 0
index = 0;
np.save(mainFolder+confMatrixFile,conf_matrix,True,True)
np.save(mainFolder+confMatrixNormalizedFile,conf_matrix_normed,True,True)
########################### Avg mAp hesaplanıp yazılıyor###############3333333
import math
for i in range(0, len(listOfValues)):
if (math.isnan(listOfValues[i]['mAP'])):
continue
else:
sumOfPR = sumOfPR + listOfValues[i]['mAP']
index= index +1
dictAvgMap={}
mAP = sumOfPR /index
dictAvgMap.update({"avgMap":mAP})
listOfValues.append(dictAvgMap)
############################ değerler dosyaya yazılıyor #############3
import pandas as pd
df = | pd.DataFrame(listOfValues) | pandas.DataFrame |
# Steinbeck.py is a python program designed specifically for
# pulling time series data from the Johns Hopkins University
# COVID-19 github and turning them in to usable timeseries
# csv's for analysis.
# @author <NAME>, <EMAIL>
# This program written and produced for and by Cloud Brigade
import pandas as pd
import numpy as np
import datetime
import boto3
from io import StringIO
BUCKETNAME = 'covid-v1-part-3-data-bucket'
def timify(data):
'''
returns a usable national dataframe using inputs from the JHU timeseries data set,
and second item it returns is a dataframe with the population of each county
data is a pandas dataframe
'''
# create a county_state column
df = data.copy()
county_state = df['Admin2'] + " County, " + df['Province_State']
county_state = pd.DataFrame(county_state)
county_state.columns = ['county_state']
# create a population flag to tell the function whether or not to return
# a population dataframe too
pop_flag=False
# remove redundant columns
remove_cols = ['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2',
'Province_State','Country_Region', 'Lat', 'Long_',
'Combined_Key']
df_2 = df.copy()
if 'Population' in df.columns:
pop_flag=True
remove_cols.append('Population')
df_2 = pd.merge(county_state, df_2['Population'].to_frame(), how='outer', left_index=True, right_index=True)
df_2 = df_2.set_index('county_state')
df.drop(columns=remove_cols, inplace=True)
#place the county_state column in the front
df = county_state.merge(df, how='outer', left_index=True, right_index=True)
if pop_flag: return df, df_2
return df
def localize(data, topic, county_state):
'''
returns a localized timeseries dataframe using inputs from timify function
df is a pandas dataframe
topic is a string, either cases or deaths for the dataframe
county_state is a string, the county and state of the time series dataframe
'''
df = data.copy()
# Break the needed data row away from the larger dataframe
local = df[df['county_state'] == county_state]
# Make the county_state the index
local.set_index('county_state', inplace=True)
# use df.loc to pull the pd.Series, flipping the axes, then make it a df again
local = pd.DataFrame(local.loc[county_state])
# Create a date column in order to change types
# (this is a hacky way to do it)
local['date'] = local.index
# convert date column to datetime type
local['date'] = pd.to_datetime(local['date'])
# make the datetime column the index
local.set_index(['date'], inplace=True)
# change the column name to the desired topic of information
local.columns=[topic]
return local
def pull_raw(datapath='/home/ubuntu/covpro/data/COVID-19/csse_covid_19_data/csse_covid_19_time_series/'):
'''
returns two dataframes, cases and deaths in that order, from the JHU git repo
datapth is the path on the local machine to the daily updated JHU git repo
'''
# Match destination files to variables
case_ts = 'time_series_covid19_confirmed_US.csv'
death_ts = 'time_series_covid19_deaths_US.csv'
# Read in the confirmed_cases and deaths data sets
# cases = pd.read_csv(datapath + case_ts)
# deaths = pd.read_csv(datapath + death_ts)
return | pd.read_csv(datapath + case_ts) | pandas.read_csv |
# %%
import pandas as pd
from pandas.api.types import union_categoricals
import numpy as np
from utils.config import Config
#%%
c = Config()
c.validate_files()
# %%
df = pd.read_csv(c.predicted_300K)
# %%
df = df[["Monolayer 1", "Monolayer 2"]]
# %%
cats1 = pd.unique(df.to_numpy().ravel())
print(f"size of cats1: {cats1.size}")
# %%
df2 = pd.read_csv(c.predicted_18M)
# %%
df2 = df2[['monolayer1', 'monolayer2']]
# %%
cats2 = pd.unique(df2.to_numpy().ravel())
print(f"size of cats2: {cats2.size:,}")
#%%
# merge cats1 and cats2 series
cats = pd.merge(
pd.Series(cats2, name='monolayer'),
| pd.Series(cats1, name='monolayer') | pandas.Series |
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import wikipedia
import musicbrainzngs
import urllib.request
import urllib.request as urllib2
import urllib.parse
import json
import requests
from bs4 import BeautifulSoup
import re
import h5py
import time
import datetime
import pandas as pd
import pickle
import pycountry
import random
from shutil import copyfile
import logging
import argparse
import os
import glob
import API_KEYS
SPOTIFY_CLIENT_ID = API_KEYS.SPOTIFY_CLIENT_ID
SPOTIFY_CLIENT_SECRET = API_KEYS.SPOTIFY_CLIENT_SECRET
LAST_FM_API_KEY = API_KEYS.LAST_FM_API_KEY
LAST_FM_SHARED_SECRET = API_KEYS.LAST_FM_SHARED_SECRET
LAST_FM_REGISTERED_TO = API_KEYS.LAST_FM_REGISTERED_TO
LAST_FM_API_KEY2 = API_KEYS.LAST_FM_API_KEY2
LAST_FM_SHARED_SECRET2 = API_KEYS.LAST_FM_SHARED_SECRET2
LAST_FM_REGISTERED_TO2 = API_KEYS.LAST_FM_REGISTERED_TO2
GENIUS_CLIENT_ID = API_KEYS.GENIUS_CLIENT_ID
GENIUS_CLIENT_SECRET = API_KEYS.GENIUS_CLIENT_SECRET
GENIUS_CLIENT_ACCESS_TOKEN = API_KEYS.GENIUS_CLIENT_ACCESS_TOKEN
MM_API_KEY = API_KEYS.MM_API_KEY
MB_CLIENT_ID = API_KEYS.MB_CLIENT_ID
MB_SECRET = API_KEYS.MB_SECRET
MAPS_API_KEY = API_KEYS.MAPS_API_KEY
LYRICS_FOUND_BY_MM = '681F1AF6-8A1A-4493-8020-E44E2006ADB1***LYRICS_FOUND_BY_MM***361E1163-EE9C-444D-874D-7E0D438EF459'
NOW = datetime.datetime.now()
NOW = str(NOW.month) + '_' + str(NOW.day) + '_' + str(NOW.hour) + '_' + str(NOW.minute)
logging.basicConfig(filename='./dumps/' + NOW + '_.log', format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
musicbrainzngs.set_useragent('haamr', 1.0)
client_credentials_manager = SpotifyClientCredentials(client_id=SPOTIFY_CLIENT_ID, client_secret=SPOTIFY_CLIENT_SECRET)
SPOTIFY = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
GENRE_LOC = pd.read_csv(
'./external_datasets/genre_places.csv',
index_col='genre'
)
with open('./external_datasets/external_data.pickle', 'rb') as f:
save = pickle.load(f)
LYRICS = save['LYRICS']
WORLD_CITIES = save['WORLD_CITIES']
MSD_ARTIST_LOCATION = save['MSD_ARTIST_LOCATION']
del save
parser = argparse.ArgumentParser(description="scrapes various apis for music content")
parser.add_argument('-n', '--num-seed-artists', default=0, help='number of seed_artists to scrape')
parser.add_argument('-c', '--random', default=False, help='grab random seed artists rather than from the top')
parser.add_argument('-s', '--seeds', default=None, help='injects seed artists via comma separated list')
parser.add_argument('-b', '--seeds-bolster', default=False, help='use bolster seed artists list instead of seed artist list')
parser.add_argument('-m', '--merge-seeds-bolster', default=False, help='merge the bolster list with the seeds list')
parser.add_argument('-t', '--seeds-top', default=False, help='inject seeds at the top of the list')
parser.add_argument('-r', '--seeds-reset', default=False, help='reset seed artists that failed so they can run a second time')
parser.add_argument('-u', '--set-seed-unscraped', default=None, help='sets a seed as unscraped')
args = parser.parse_args()
####### Utility #######
def printlog(message, e=False):
print(message)
if e:
logging.exception(message)
else:
logging.info(message)
def get_dataframes():
if os.path.isfile('./data.pickle'):
with open('./data.pickle', 'rb') as f:
save = pickle.load(f)
artists = save['artists']
future_artists = save['future_artists']
seed_artists = save['seed_artists']
bolster_artists = save['bolster_artists']
albums = save['albums']
tracks = save['tracks']
del save
else:
# id: {name: '', genres: [], related: [], lat: 0.0, lng: 0.0}
col = ['name', 'genres', 'related', 'lat', 'lng']
artists = pd.DataFrame(columns=col)
future_artists = pd.DataFrame(columns=col)
# id: {has_been_scraped: bool}
col = ['has_been_scraped']
seed_artists = pd.DataFrame(columns=col)
# id: {has_been_scraped: bool}
col = ['has_been_scraped']
bolster_artists = pd.DataFrame(columns=col)
# id: {artist_id: '', name: '', release_date: '', release_date_precision: ''}
col = ['artist_id', 'name', 'release_date', 'release_date_precision']
albums = pd.DataFrame(columns=col)
# id: {artist_id: '', album_id: '', name: ''}
col = ['artist_id', 'album_id', 'name']
tracks = pd.DataFrame(columns=col)
return artists, future_artists, seed_artists, bolster_artists, albums, tracks
def backup_dataframes():
if os.path.isfile('./data.pickle'):
printlog('Backing up data file...')
copyfile('./data.pickle', './backups/' + NOW + '_' + 'data.pickle')
def save_dataframes(artists, future_artists, seed_artists, bolster_artists, albums, tracks):
backup_dataframes()
with open('./data.pickle', 'wb') as f:
save = {
'artists': artists,
'future_artists': future_artists,
'seed_artists': seed_artists,
'bolster_artists': bolster_artists,
'albums': albums,
'tracks': tracks
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
del save
printlog('data.pickle saved succesfully!')
def reset_failed_seed_artists(seed, artists):
seed.loc[set(seed.loc[seed.has_been_scraped == True].index) - set(artists.index)] = False
####### Data #######
def get_new_random_artist_from_lyrics(df_artists):
new_artist = random.choice(list(set(LYRICS.artist.values)))
if new_artist not in [name.lower() for name in df_artists.name.values]:
return LYRICS.loc[LYRICS.artist == new_artist]
else:
return get_new_random_artist_from_lyrics(df_artists=df_artists)
def inject_seed_artists(df, list_ids, top=False, dfb=None, bolster=False):
if bolster:
for i in list_ids:
if i in df.index:
if not df.loc[i].has_been_scraped:
dfb.loc[i] = False
else:
dfb.loc[i] = False
elif top:
df_top = pd.DataFrame(columns=['has_been_scraped'])
for i in list_ids:
df_top.loc[i] = False
return pd.concat([df_top, df])[~pd.concat([df_top, df]).index.duplicated(keep='first')]
else:
for i in list_ids:
if i not in df.index:
df.loc[i] = False
def get_next_seed_artist(seed_artists, r=False):
if r:
try:
return random.choice(seed_artists.loc[seed_artists.has_been_scraped == False].index.values)
except IndexError:
return -1
else:
try:
return seed_artists.loc[seed_artists.has_been_scraped == False].index.values[0]
except IndexError:
return -1
def mark_seed_as_scraped(df, seed_artist_id, dfb, bolster):
df.loc[seed_artist_id] = True
if bolster:
dfb.loc[seed_artist_id] = True
def mark_seed_as_unscraped(df, seed_artist_id):
df.loc[seed_artist_id] = False
def add_artist(df, artist_id, name, genres, related, lat, lng):
if artist_id not in df.index:
df.loc[artist_id] = [name, genres, related, lat, lng]
def add_track(df, track_id, artist_id, album_id, name):
if track_id not in df.index:
df.loc[track_id] = [artist_id, album_id, name]
def add_albums(df, album_id, artist_id, name, release_date, release_date_precision):
if album_id not in df.index:
df.loc[album_id] = [artist_id, name, release_date, release_date_precision]
####### Artist #######
def fr_get_related(res):
col = ['name', 'genres', 'related', 'lat', 'lng']
artists = pd.DataFrame(columns=col)
for artist in res['artists']:
df = fr_get_artist_metadata(res=artist)
artists.loc[artist['id']] = df.loc[artist['id']]
return artists
def fr_get_artist_metadata(res):
col = ['name', 'genres', 'related', 'lat', 'lng']
artist = pd.DataFrame(columns=col)
printlog(
str(res['id']) +
' : ' + str(res['name']) +
' : ' + str(res['genres']) +
' : ??' + ' : ??'
)
artist.loc[res['id']] = [res['name'], res['genres'], None, None, None]
return artist
####### Lyrics #######
def clean_song_title(song_title):
song_title = re.sub(re.compile(r' -.*Radio.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Cut.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Version.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Mix.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Extended.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Edit.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Remix.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Remastered.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Live.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Session.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*B-Side.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' -.*Bonus.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' \(feat.*', re.IGNORECASE), '', song_title)
song_title = re.sub(re.compile(r' \(from.*', re.IGNORECASE), '', song_title)
return re.sub(re.compile(r' \(.*Version.*\)', re.IGNORECASE), '', song_title)
def clean_lyrics(lyrics):
lyrics = re.sub(re.compile(r'\[Produced by.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Interlude.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Chorus.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Verse.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Chorus.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Pre-Chorus.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Bridge.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Outro.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Pre.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Hook.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Sample.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Refrain.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Intro.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Part.*\]', re.IGNORECASE), '', lyrics)
lyrics = re.sub(re.compile(r'\[Breakdown.*\]', re.IGNORECASE), '', lyrics)
return re.sub(re.compile(r'^(\n)*|(\n)*$'), '', lyrics)
def get_lyrics_genius(song_title, artist_name):
base_url = 'https://api.genius.com'
headers = {'Authorization': 'Bearer ' + GENIUS_CLIENT_ACCESS_TOKEN}
search_url = base_url + '/search'
data = {'q': song_title + ' ' + artist_name}
response = requests.get(search_url, data=data, headers=headers)
json = response.json()
remote_song_info = None
hits = json['response']['hits']
for hit in hits:
if artist_name.lower() in hit['result']['primary_artist']['name'].lower():
remote_song_info = hit
break
if remote_song_info:
song_url = remote_song_info['result']['url']
page = requests.get(song_url)
html = BeautifulSoup(page.text, 'html.parser')
lyrics = html.find('div', class_='lyrics').get_text()
return clean_lyrics(lyrics)
def get_lyrics_wikia(song_title, artist_name):
url = 'http://lyric-api.herokuapp.com/api/find/' + artist_name.replace(' ', '%20') + '/' + song_title.replace(' ', '%20')
return json.load(urllib2.urlopen(url))['lyric']
def get_lyrics_az(song_title, artist_name):
artist_name = artist_name.lower()
song_title = song_title.lower()
# remove all except alphanumeric characters from artist_name and song_title
artist_name = re.sub('[^A-Za-z0-9]+', "", artist_name)
song_title = re.sub('[^A-Za-z0-9]+', "", song_title)
# remove starting 'the' from artist_name e.g. the who -> who
if artist_name.startswith("the"):
artist_name = artist_name[3:]
url = "http://azlyrics.com/lyrics/"+artist_name+"/"+song_title+".html"
content = urllib.request.urlopen(url).read()
soup = BeautifulSoup(content, 'html.parser')
lyrics = str(soup)
# lyrics lies between up_partition and down_partition
up_partition = '<!-- Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. -->'
down_partition = '<!-- MxM banner -->'
lyrics = lyrics.split(up_partition)[1]
lyrics = lyrics.split(down_partition)[0]
lyrics = lyrics.replace('<br>','').replace('<br/>','').replace('</br>','').replace('</div>','').strip()
return lyrics
def get_lyrics_mm(song_title, artist_name):
base_url = "https://api.musixmatch.com/ws/1.1/" + "matcher.lyrics.get" + "?format=json&callback=callback"
artist_search = "&q_artist=" + artist_name
song_search = "&q_track=" + song_title
api = "&apikey=" + MM_API_KEY
api_call = base_url + artist_search + song_search + api
request = requests.get(api_call)
data = request.json()
data = data['message']['body']
return data['lyrics']['lyrics_body'][:-70]
def get_track_lyrics(song_title, artist_name):
printlog(f'get_track_lyrics({song_title}, {artist_name})')
if len(song_title) != len(clean_song_title(song_title)):
printlog(f'Track lyric cleaned to "{clean_song_title(song_title)}"')
song_title = clean_song_title(song_title)
ytb = 'Lyrics for this song have yet to be'
asl = 'Your name will be printed as part of the credit when your lyric is approved'
unknown = '[?]'
nl = 'we are not licensed to display the full lyrics for this song'
printlog('Try Genius...')
try:
lyrics = get_lyrics_genius(song_title, artist_name)
if len(lyrics) == 0:
raise Exception('Lyrics empty')
if len(lyrics) >= 15000:
raise Exception('Lyrics too big')
if ytb in lyrics or asl in lyrics or unknown in lyrics or nl in lyrics:
raise Exception('Lyrics yet to be released')
except Exception:
printlog('Genius lyrics not found, try LYRICS data...', e=True)
try:
lyrics = LYRICS.loc[LYRICS.artist == artist_name.lower()].\
loc[LYRICS.song == song_title.lower()].lyrics.values[0]
lyrics = clean_lyrics(lyrics)
if len(lyrics) == 0:
raise Exception('Lyrics empty')
if len(lyrics) >= 15000:
raise Exception('Lyrics too big')
except Exception:
printlog('LYRICS lyrics not found, try wikia...', e=True)
try:
lyrics = get_lyrics_wikia(song_title, artist_name)
if len(lyrics) == 0:
raise Exception('Lyrics empty')
if len(lyrics) >= 15000:
raise Exception('Lyrics too big')
except Exception:
printlog('wikia lyrics not found, try AZ...', e=True)
try:
lyrics = get_lyrics_az(song_title, artist_name)
if len(lyrics) == 0:
raise Exception('Lyrics empty')
if len(lyrics) >= 15000:
raise Exception('Lyrics too big')
except Exception:
printlog('AZ lyrics not found, try mm...', e=True)
try:
lyrics = get_lyrics_mm(song_title, artist_name)
if len(lyrics) == 0:
raise Exception('Lyrics empty')
if len(lyrics) >= 15000:
raise Exception('Lyrics too big')
lyrics = lyrics + '\n\n' + LYRICS_FOUND_BY_MM
except Exception:
printlog('No lyrics found, exit', e=True)
return None
return lyrics
####### Track #######
def get_earliest_album(artists_name, track_name, year, track_id, artist_id, album_id):
q='artist:' + artists_name + ' track:' + clean_song_title(track_name)
results = SPOTIFY.search(q=q, limit=20, type='track')
track = None
for t in results['tracks']['items']:
if -60 < (int(t['album']['release_date'][:4]) - year) < -10 \
and t['artists'][0]['id'] == artist_id \
and t['id'] != track_id \
and t['album']['id'] != album_id \
and int(t['album']['release_date'][:4]) > 1910:
year = int(t['album']['release_date'][:4])
if 'Live' not in t['name'] and 'Remix' not in t['name']:
track = t
if track is not None:
for t in results['tracks']['items']:
if (int(t['album']['release_date'][:4]) - year) < 0 \
and t['artists'][0]['id'] == artist_id \
and t['id'] != track_id \
and t['album']['id'] != album_id \
and int(t['album']['release_date'][:4]) > 1910:
year = int(t['album']['release_date'][:4])
if 'Live' not in t['name'] and 'Remix' not in t['name']:
track = t
return track
def compare_lyrics(lyrics_list, lyrics):
for _, v in lyrics_list.items():
if v == lyrics:
return False
return True
def fr_get_top_tracks_albums(df_tracks, df_albums, country, artist_name, artist_id):
# id: {artist_id: '', name: '', release_date: '', release_date_precision: ''}
col = ['artist_id', 'name', 'release_date', 'release_date_precision']
albums = pd.DataFrame(columns=col)
# id: {artist_id: '', album_id: '', name: ''}
col = ['artist_id', 'album_id', 'name']
tracks = pd.DataFrame(columns=col)
lyrics_list = {}
previews = {}
album_covers = {}
if country is not None and country not in ['XK', 'US']:
res_home = SPOTIFY.artist_top_tracks(seed_artist_id, country=country)
for track in res_home['tracks']:
try:
t = get_earliest_album(
artist_name,
track['name'],
int(track['album']['release_date'][:4]),
track['id'],
artist_id,
track['album']['id']
)
if t is not None:
track = t
except:
pass
if track['preview_url'] is None:
continue
if track['id'] in df_tracks.index:
continue
if track['name'] in df_tracks.loc[df_tracks.artist_id == artist_id].name.values:
continue
try:
lyrics = get_track_lyrics(track['name'], artist_name)
except Exception:
continue
if lyrics is not None and compare_lyrics(lyrics_list, lyrics):
printlog(
str(track['id']) +
' : ' + str(track['name'])
)
tracks.loc[track['id']] = [artist_id, track['album']['id'], track['name']]
if track['album']['id'] not in albums.index and track['album']['id'] not in df_albums.index:
printlog(
str(track['album']['id']) +
' : ' + str(track['album']['name']) +
' : ' + str(track['album']['release_date']) +
' : ' + str(track['album']['release_date_precision'])
)
albums.loc[track['album']['id']] = [
artist_id,
track['album']['name'],
track['album']['release_date'],
track['album']['release_date_precision']
]
lyrics_list[track['id']] = lyrics
previews[track['id']] = track['preview_url']
if track['album']['id'] not in df_albums.index:
if track['album']['id'] not in album_covers:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
elif album_covers[track['album']['id']] is None:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
res_US = SPOTIFY.artist_top_tracks(seed_artist_id, country='US')
for track in res_US['tracks']:
try:
t = get_earliest_album(
artist_name,
track['name'],
int(track['album']['release_date'][:4]),
track['id'],
artist_id,
track['album']['id']
)
if t is not None:
track = t
except:
pass
if track['preview_url'] is None:
continue
if track['id'] in df_tracks.index:
continue
if track['name'] in df_tracks.loc[df_tracks.artist_id == artist_id].name.values:
continue
if track['name'] in tracks.name.values:
continue
if track['id'] in previews:
if previews[track['id']] is None:
try:
lyrics = get_track_lyrics(track['name'], artist_name)
except Exception:
continue
if lyrics is not None and compare_lyrics(lyrics_list, lyrics):
printlog(
str(track['id']) +
' : ' + str(track['name'])
)
tracks.loc[track['id']] = [artist_id, track['album']['id'], track['name']]
if track['album']['id'] not in albums.index and track['album']['id'] not in df_albums.index:
printlog(
str(track['album']['id']) +
' : ' + str(track['album']['name']) +
' : ' + str(track['album']['release_date']) +
' : ' + str(track['album']['release_date_precision'])
)
albums.loc[track['album']['id']] = [
artist_id,
track['album']['name'],
track['album']['release_date'],
track['album']['release_date_precision']
]
lyrics_list[track['id']] = lyrics
previews[track['id']] = track['preview_url']
if track['album']['id'] not in df_albums.index:
if track['album']['id'] not in album_covers:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
elif album_covers[track['album']['id']] is None:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
else:
try:
lyrics = get_track_lyrics(track['name'], artist_name)
except Exception:
continue
if lyrics is not None and compare_lyrics(lyrics_list, lyrics):
printlog(
str(track['id']) +
' : ' + str(track['name'])
)
tracks.loc[track['id']] = [artist_id, track['album']['id'], track['name']]
if track['album']['id'] not in albums.index and track['album']['id'] not in df_albums.index:
printlog(
str(track['album']['id']) +
' : ' + str(track['album']['name']) +
' : ' + str(track['album']['release_date']) +
' : ' + str(track['album']['release_date_precision'])
)
albums.loc[track['album']['id']] = [
artist_id,
track['album']['name'],
track['album']['release_date'],
track['album']['release_date_precision']
]
lyrics_list[track['id']] = lyrics
previews[track['id']] = track['preview_url']
if track['album']['id'] not in df_albums.index:
if track['album']['id'] not in album_covers:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
elif album_covers[track['album']['id']] is None:
try:
album_covers[track['album']['id']] = track['album']['images'][0]['url']
except:
album_covers[track['album']['id']] = None
return tracks, albums, lyrics_list, previews, album_covers
def fr_get_all_tracks(country, artist_name, artist_id):
# id: {artist_id: '', name: '', release_date: '', release_date_precision: ''}
col = ['artist_id', 'name', 'release_date', 'release_date_precision']
albums = | pd.DataFrame(columns=col) | pandas.DataFrame |
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from transformers import BertModel, BertTokenizer
def encode_query(text, tokenizer, model, device='cpu'):
max_length = 36 # hardcode for now
inputs = tokenizer(
'[CLS] [Q] ' + text + ' [MASK]' * max_length,
max_length=max_length,
truncation=True,
add_special_tokens=False,
return_tensors='pt'
)
inputs.to(device)
outputs = model(**inputs)
embeddings = outputs.last_hidden_state.detach().cpu().numpy()
return np.average(embeddings[:, 4:, :], axis=-2).flatten()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--encoder', type=str, help='encoder name or path', required=True)
parser.add_argument('--input', type=str, help='query file to be encoded.', required=True)
parser.add_argument('--output', type=str, help='path to store query embeddings', required=True)
parser.add_argument('--device', type=str,
help='device cpu or cuda [cuda:0, cuda:1...]', default='cpu', required=False)
args = parser.parse_args()
tokenizer = BertTokenizer.from_pretrained(args.encoder)
model = BertModel.from_pretrained(args.encoder)
model.to(args.device)
embeddings = {'id': [], 'text': [], 'embedding': []}
for line in tqdm(open(args.input, 'r').readlines()):
qid, text = line.rstrip().split('\t')
qid = qid.strip()
text = text.strip()
embeddings['id'].append(qid)
embeddings['text'].append(text)
embeddings['embedding'].append(encode_query(text, tokenizer, model, args.device))
embeddings = | pd.DataFrame(embeddings) | pandas.DataFrame |
import os
import pandas as pd
import pytest
from pytest_mock import MockerFixture
from src import config
from src.selection import select_data
class TestFilter2017and2014and2011:
def test_data_frame(self):
code = select_data.COMPUTER_SCIENCE_CODE_2017_2014_2011
input_df = pd.DataFrame(columns=["dados", "CO_GRUPO"],
data=[["1", code],
["2", code+1],
["3", code -1]])
expected_df = pd.DataFrame(columns=["dados", "CO_GRUPO"],
data=[["1", code]])
output_df = select_data.filter_computer_science_2017_2014_2011(input_df)
assert output_df.equals(expected_df)
class TestFilter2008:
def test_data_frame(self):
code = select_data.COMPUTER_SCIENCE_CODE_2008
input_df = pd.DataFrame(columns=["dados", "co_subarea"],
data=[["1", code],
["2", code+1],
["3", code-1]])
expected_df = pd.DataFrame(columns=["dados", "co_subarea"],
data=[["1", code]])
output_df = select_data.filter_computer_science_2008(input_df)
assert output_df.equals(expected_df)
class TestGetComputerScienceAnswerKey2005:
def test_raise_value_error(selfs) -> None:
input_series = pd.Series(["...", ".."])
with pytest.raises(ValueError):
select_data.get_computer_science_answer_key_2005(input_series)
def test_example(self) -> None:
input_series = | pd.Series([".ad..", "..d.."]) | pandas.Series |
import matplotlib.pyplot as plt
import datetime as datetime
import numpy as np
import pandas as pd
import talib
import seaborn as sns
from time import time
from sklearn import preprocessing
from pandas.plotting import register_matplotlib_converters
from .factorize import FactorManagement
import scipy.stats as stats
import cvxpy as cvx
import zipfile
import os
from sklearn import linear_model, decomposition, ensemble, preprocessing, isotonic, metrics
from sklearn.impute import SimpleImputer
import xgboost
register_matplotlib_converters()
class Learner:
def __init__(self):
pass
@staticmethod
def shift_mask_data(X, Y, upper_percentile, lower_percentile, n_fwd_days):
# Shift X to match factors at t to returns at t+n_fwd_days (we want to predict future returns after all)
shifted_X = np.roll(X, n_fwd_days + 1, axis=0)
# Slice off rolled elements
X = shifted_X[n_fwd_days + 1:]
Y = Y[n_fwd_days + 1:]
n_time, n_stocks, n_factors = X.shape
# Look for biggest up and down movers
upper = np.nanpercentile(Y, upper_percentile, axis=1)[:, np.newaxis]
lower = np.nanpercentile(Y, lower_percentile, axis=1)[:, np.newaxis]
upper_mask = (Y >= upper)
lower_mask = (Y <= lower)
mask = upper_mask | lower_mask # This also drops nans
mask = mask.flatten()
# Only try to predict whether a stock moved up/down relative to other stocks
Y_binary = np.zeros(n_time * n_stocks)
Y_binary[upper_mask.flatten()] = 1
Y_binary[lower_mask.flatten()] = -1
# Flatten X
X = X.reshape((n_time * n_stocks, n_factors))
# Drop stocks that did not move much (i.e. are in the 30th to 70th percentile)
X = X[mask]
Y_binary = Y_binary[mask]
return X, Y_binary
def feature_importance_adaboost(self, n_fwd_days, close, all_factors, n_estimators, train_size,
upper_percentile, lower_percentile):
pipe = all_factors
pipe.index = pipe.index.set_levels([pd.to_datetime(pipe.index.levels[0]), pipe.index.levels[1]])
close = close[pipe.index.levels[1]]
close.index = | pd.to_datetime(close.index) | pandas.to_datetime |
from datetime import timedelta
import operator
from typing import Any, Callable, List, Optional, Sequence, Union
import numpy as np
from pandas._libs.tslibs import (
NaT,
NaTType,
frequencies as libfrequencies,
iNaT,
period as libperiod,
)
from pandas._libs.tslibs.fields import isleapyear_arr
from pandas._libs.tslibs.period import (
DIFFERENT_FREQ,
IncompatibleFrequency,
Period,
get_period_field_arr,
period_asfreq_arr,
)
from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
_TD_DTYPE,
ensure_object,
is_datetime64_dtype,
is_float_dtype,
is_period_dtype,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.generic import (
ABCIndexClass,
ABCPeriodArray,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.algorithms as algos
from pandas.core.arrays import datetimelike as dtl
import pandas.core.common as com
from pandas.tseries import frequencies
from pandas.tseries.offsets import DateOffset, Tick, _delta_to_tick
def _field_accessor(name, alias, docstring=None):
def f(self):
base, mult = libfrequencies.get_freq_code(self.freq)
result = | get_period_field_arr(alias, self.asi8, base) | pandas._libs.tslibs.period.get_period_field_arr |
import re
from datetime import datetime, timedelta
import numpy as np
import pandas.compat as compat
import pandas as pd
from pandas.compat import u, StringIO
from pandas.core.base import FrozenList, FrozenNDArray, DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assert_isinstance
from pandas import Series, Index, Int64Index, DatetimeIndex, PeriodIndex
from pandas import _np_version_under1p7
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container)
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
raise nose.SkipTest('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container)
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# pass whatever functions you normally would to assertRaises (after the Exception kind)
assertRaisesRegexp(TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem(): self.container[0] = 5
self.check_mutable_error(setitem)
def setslice(): self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem(): del self.container[0]
self.check_mutable_error(delitem)
def delslice(): del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert_isinstance(result, klass)
self.assertEqual(result, expected)
class TestFrozenList(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin, tm.TestCase):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setUp(self):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert_isinstance(self.container.view(), FrozenNDArray)
self.assertFalse(isinstance(self.container.view(np.ndarray), FrozenNDArray))
self.assertIsNot(self.container.view(), self.container)
self.assert_numpy_array_equal(self.container, original)
# shallow copy should be the same too
assert_isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container): container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
self.assert_numpy_array_equal(original, vals)
self.assertIsNot(original, vals)
vals[0] = n
self.assert_numpy_array_equal(self.container, original)
self.assertEqual(vals[0], n)
class Ops(tm.TestCase):
def setUp(self):
self.int_index = tm.makeIntIndex(10)
self.float_index = tm.makeFloatIndex(10)
self.dt_index = tm.makeDateIndex(10)
self.dt_tz_index = tm.makeDateIndex(10).tz_localize(tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10)
self.string_index = tm.makeStringIndex(10)
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index)
self.float_series = Series(arr, index=self.int_index)
self.dt_series = Series(arr, index=self.dt_index)
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index)
self.string_series = Series(arr, index=self.string_index)
types = ['int','float','dt', 'dt_tz', 'period','string']
self.objs = [ getattr(self,"{0}_{1}".format(t,f)) for t in types for f in ['index','series'] ]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index,op),index=o.index)
else:
expected = getattr(o,op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o,op)
# these couuld be series, arrays or scalars
if isinstance(result,Series) and isinstance(expected,Series):
tm.assert_series_equal(result,expected)
elif isinstance(result,Index) and isinstance(expected,Index):
tm.assert_index_equal(result,expected)
elif isinstance(result,np.ndarray) and isinstance(expected,np.ndarray):
self.assert_numpy_array_equal(result,expected)
else:
self.assertEqual(result, expected)
# freq raises AttributeError on an Int64Index because its not defined
# we mostly care about Series hwere anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError, otherwise
# an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
self.assertRaises(TypeError, lambda : getattr(o,op))
else:
self.assertRaises(AttributeError, lambda : getattr(o,op))
class TestIndexOps(Ops):
def setUp(self):
super(TestIndexOps, self).setUp()
self.is_valid_objs = [ o for o in self.objs if o._allow_index_ops ]
self.not_valid_objs = [ o for o in self.objs if not o._allow_index_ops ]
def test_ops(self):
tm._skip_if_not_numpy17_friendly()
for op in ['max','min']:
for o in self.objs:
result = getattr(o,op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(ordinal=getattr(o.values, op)(), freq=o.freq)
try:
self.assertEqual(result, expected)
except ValueError:
# comparing tz-aware series with np.array results in ValueError
expected = expected.astype('M8[ns]').astype('int64')
self.assertEqual(result.value, expected)
def test_nanops(self):
# GH 7261
for op in ['max','min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
self.assertEqual(getattr(obj, op)(), 2.0)
obj = klass([np.nan])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
self.assertEqual(getattr(obj, op)(), datetime(2011, 11, 1))
def test_value_counts_unique_nunique(self):
for o in self.objs:
klass = type(o)
values = o.values
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
# freq must be specified because repeat makes freq ambiguous
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
expected_s = Series(range(10, 0, -1), index=values[::-1], dtype='int64')
tm.assert_series_equal(o.value_counts(), expected_s)
if isinstance(o, DatetimeIndex):
# DatetimeIndex.unique returns DatetimeIndex
self.assertTrue(o.unique().equals(klass(values)))
else:
self.assert_numpy_array_equal(o.unique(), values)
self.assertEqual(o.nunique(), len(np.unique(o.values)))
for null_obj in [np.nan, None]:
for o in self.objs:
klass = type(o)
values = o.values
if o.values.dtype == 'int64':
# skips int64 because it doesn't allow to include nan or None
continue
if o.values.dtype == 'datetime64[ns]' and _np_version_under1p7:
# Unable to assign None
continue
# special assign to the numpy array
if o.values.dtype == 'datetime64[ns]':
values[0:2] = pd.tslib.iNaT
else:
values[0:2] = null_obj
# create repeated values, 'n'th element is repeated by n+1 times
if isinstance(o, PeriodIndex):
o = klass(np.repeat(values, range(1, len(o) + 1)), freq=o.freq)
else:
o = klass(np.repeat(values, range(1, len(o) + 1)))
if isinstance(o, DatetimeIndex):
expected_s_na = Series(list(range(10, 2, -1)) + [3], index=values[9:0:-1])
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1])
else:
expected_s_na = Series(list(range(10, 2, -1)) +[3], index=values[9:0:-1], dtype='int64')
expected_s = Series(list(range(10, 2, -1)), index=values[9:1:-1], dtype='int64')
tm.assert_series_equal(o.value_counts(dropna=False), expected_s_na)
tm.assert_series_equal(o.value_counts(), expected_s)
# numpy_array_equal cannot compare arrays includes nan
result = o.unique()
self.assert_numpy_array_equal(result[1:], values[2:])
if isinstance(o, DatetimeIndex):
self.assertTrue(result[0] is pd.NaT)
else:
self.assertTrue(pd.isnull(result[0]))
self.assertEqual(o.nunique(), 8)
self.assertEqual(o.nunique(dropna=False), 9)
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.unique(s_values))
self.assertEqual(s.nunique(), 4)
# don't sort, have to sort after the fact as not sorting is platform-dep
hist = s.value_counts(sort=False)
hist.sort()
expected = Series([3, 1, 4, 2], index=list('acbd'))
expected.sort()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
# bins
self.assertRaises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({0.998: 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({0.998: 1.0})
tm.assert_series_equal(res1n, exp1n)
self.assert_numpy_array_equal(s1.unique(), np.array([1, 2, 3]))
self.assertEqual(s1.nunique(), 3)
res4 = s1.value_counts(bins=4)
exp4 = Series({0.998: 2, 1.5: 1, 2.0: 0, 2.5: 1}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series({0.998: 0.5, 1.5: 0.25, 2.0: 0.0, 2.5: 0.25}, index=[0.998, 2.5, 1.5, 2.0])
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan, 'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array(['a', 'b', np.nan, 'd'], dtype='O'))
self.assertEqual(s.nunique(), 3)
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected)
self.assert_numpy_array_equal(s.unique(), np.array([]))
self.assertEqual(s.nunique(), 0)
# GH 3002, datetime64[ns]
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM', 'xxyyzz20100101EGG',
'xxyyww20090101EGG', 'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3], names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
idx = pd.to_datetime(['2010-01-01 00:00:00Z', '2008-09-09 00:00:00Z', '2009-01-01 00:00:00X'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np.array(['2010-01-01 00:00:00Z', '2009-01-01 00:00:00Z', '2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, DatetimeIndex):
expected = DatetimeIndex(expected)
self.assertTrue(s.unique().equals(expected))
else:
self.assert_numpy_array_equal(s.unique(), expected)
self.assertEqual(s.nunique(), 3)
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
self.assertEqual(result.index.dtype, 'datetime64[ns]')
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
self.assertEqual(unique.dtype, 'datetime64[ns]')
# numpy_array_equal cannot compare pd.NaT
self.assert_numpy_array_equal(unique[:3], expected)
self.assertTrue(unique[3] is pd.NaT or unique[3].astype('int64') == pd.tslib.iNaT)
self.assertEqual(s.nunique(), 3)
self.assertEqual(s.nunique(dropna=False), 4)
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td)
result = td.value_counts()
expected_s = Series([6], index=[86400000000000])
self.assertEqual(result.index.dtype, 'int64')
tm.assert_series_equal(result, expected_s)
# get nanoseconds to compare
expected = np.array([86400000000000])
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2)
result2 = td2.value_counts()
self.assertEqual(result2.index.dtype, 'int64')
tm.assert_series_equal(result2, expected_s)
self.assert_numpy_array_equal(td.unique(), expected)
self.assertEqual(td.nunique(), 1)
def test_factorize(self):
for o in self.objs:
exp_arr = np.array(range(len(o)))
labels, uniques = o.factorize()
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
for o in self.objs:
# sort by value, and create duplicates
if isinstance(o, Series):
o.sort()
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
labels, uniques = n.factorize(sort=True)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.values)
self.assert_numpy_array_equal(uniques, expected)
else:
self.assertTrue(uniques.equals(o))
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4])
labels, uniques = n.factorize(sort=False)
self.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(np.concatenate([o.values[5:10], o.values[:5]]))
self.assert_numpy_array_equal(uniques, expected)
else:
expected = o[5:].append(o[:5])
self.assertTrue(uniques.equals(expected))
class TestDatetimeIndexOps(Ops):
_allowed = '_allow_datetime_index_ops'
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEquals(s.year,2000)
self.assertEquals(s.month,1)
self.assertEquals(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
# monotonic
idx1 = pd.DatetimeIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """<class 'pandas.tseries.index.DatetimeIndex'>
Length: 0, Freq: D, Timezone: None"""
exp2 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01]
Length: 1, Freq: D, Timezone: None"""
exp3 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, 2011-01-02]
Length: 2, Freq: D, Timezone: None"""
exp4 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01, ..., 2011-01-03]
Length: 3, Freq: D, Timezone: None"""
exp5 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00+09:00, ..., 2011-01-01 11:00:00+09:00]
Length: 3, Freq: H, Timezone: Asia/Tokyo"""
exp6 = """<class 'pandas.tseries.index.DatetimeIndex'>
[2011-01-01 09:00:00-05:00, ..., NaT]
Length: 3, Freq: None, Timezone: US/Eastern"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
class TestPeriodIndexOps(Ops):
_allowed = '_allow_period_index_ops'
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: x._allow_datetime_index_ops or x._allow_period_index_ops
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = | PeriodIndex([pd.NaT], freq='M') | pandas.PeriodIndex |
import pandas as pd
import numpy as np
from pandas.tseries.holiday import USFederalHolidayCalendar
from oolearning.transformers.TransformerBase import TransformerBase
class EncodeDateColumnsTransformer(TransformerBase):
"""
Replaces each date column with numeric/boolean columns that represent things such as:
year, month, day, hour, min, second, ..., is_weekday, is_weekend
"""
def __init__(self, include_columns: list=None):
"""
:param include_columns: list of encoded date columns to use (i.e. subset of `encoded_columns()`
"""
super().__init__()
if include_columns is not None:
assert isinstance(include_columns, list)
assert len(set(include_columns).difference(EncodeDateColumnsTransformer.encoded_columns())) == 0
self._include_columns = include_columns
@staticmethod
def encoded_columns():
return [
'year',
'month',
'day',
'hour',
'minute',
'second',
'quarter',
'week',
'days_in_month',
'day_of_year',
'day_of_week',
'is_leap_year',
'is_month_end',
'is_month_start',
'is_quarter_end',
'is_quarter_start',
'is_year_end',
'is_year_start',
'is_us_federal_holiday',
'is_weekday',
'is_weekend',
]
def _fit_definition(self, data_x: pd.DataFrame) -> dict:
# no state to capture
return {}
@staticmethod
def _get_date_columns(dataframe: pd.DataFrame):
return dataframe.select_dtypes(include=[np.datetime64]).columns.values
def _transform_definition(self, data_x: pd.DataFrame, state: dict) -> pd.DataFrame:
date_columns = self._get_date_columns(dataframe=data_x)
# no date_columns, nothing to modify
if len(date_columns) == 0:
return data_x
# find the min/max dates used in the entire dataset; used to create the holiday calender
min_date = None
max_date = None
for column in date_columns:
temp_min = data_x[column].min()
if min_date is None or temp_min < min_date:
min_date = temp_min
temp_max = data_x[column].max()
if max_date is None or temp_max > max_date:
max_date = temp_max
calendar = | USFederalHolidayCalendar() | pandas.tseries.holiday.USFederalHolidayCalendar |
from .branches import MuType
import numpy as np
import pandas as pd
import os
from functools import reduce
from itertools import combinations as combn
from itertools import product
from operator import or_
from re import sub as gsub
from copy import deepcopy
from math import log10, floor
from sklearn.cluster import MeanShift
class MuTree(object):
"""A hierarchy of samples organized by mutation annotation levels.
A MuTree stores variant mutant data for a set of samples in a tree-like
data structure. Each level in the tree corresponds to a particular
mutation annotation hierarchy level, such as Gene, Form, Exon, Protein,
etc. Each node in the tree corresponds to a particular value of the
annotation level present in at least one of the samples stored in the
tree, thus representing a mutation sub-type such as 'TP53' for the Gene
level, 'Missense_Mutation' for the Form level, 'R34K' for the Protein
level, and so on.
A node N* at the ith level of the tree has children nodes for each of
the mutation types present at the (i+1)th annotation level for the samples
also having mutations of type represented by N*. Thus in a tree
containing the levels Gene, Form, and Exon, a node representing the ACT1
gene will have a child representing missense mutations of ACT1, but only
if at least one of the samples in the tree has this type of missense
mutations. Similarly, this ACT1 - missense node may have children
corresponding further sub-types of this mutation located on the 3rd, 5th,
or 8th exon of ACT1.
Every node in a MuTree is also a MuTree, except for the leaf nodes, which
are frozensets of the samples which the mutation sub-type with all of the
annotation level values of the parent nodes. Thus in the above example,
the node representing the missense mutations of the ACT1 gene located on
its 5th exon would simply be the samples with this mutation sub-type,
since 'Exon' is the final annotation level contained in this MuTree.
Levels can either be fields in the 'muts' DataFrame, in which case the
tree will have a branch for each unique value in the field, or one of the
keys of the MuTree.mut_fields object, in which case they will be defined
by the corresponding MuType.muts_<level> method.
Args:
muts (:obj:`pd.DataFrame`, shape = [n_muts, n_annot_fields]
Input mutation data, each record is a mutation occurring in
a sample to be included in the tree.
Must contain a 'Sample' column.
levels (:obj:`tuple` of :obj:`str`)
A list of mutation annotation levels to be included in the tree.
Attributes:
depth (int): How many mutation levels are above the tree
in the hierarchy.
mut_level (str): The mutation annotation level described by the top
level of the tree.
Examples:
>>> mut_data = pd.DataFrame(
>>> {'Sample': ['S1', 'S2', 'S3', 'S4'],
>>> 'Gene': ['TP53', 'TP53', 'KRAS', 'TP53'],
>>> 'Exon': ['3', '3', '2', '7'],
>>> 'Protein': ['H3R', 'S7T', 'E1R', 'Y11R']}
>>> )
>>> mtree = MuTree(mut_data, levels=['Gene', 'Exon', 'Protein'])
>>> print(mtree)
Gene IS TP53 AND
Exon is 3 AND
Protein is H3R: S1
Protein is S7T: S2
Exon is 7 AND
Protein is Y11R: S4
Gene is KRAS AND
Exon is 2 AND
Protein is E1R: S3
"""
# mapping between fields in an input mutation table and
# custom mutation levels
mut_fields = {
'Type': ('Consequence', 'Protein'),
'Location': ('Protein', ),
}
@classmethod
def split_muts(cls, muts, lvl_name, **kwargs):
"""Splits mutations into tree branches for a given level.
Args:
muts (pd.DataFrame), shape = [n_muts, n_annot_fields]
A list of mutations to be split according to the given
annotation level, where each row corresponds to a mutation
in a particular sample. Must contain the annotation fields
needed by the given level.
lvl_name (str)
An annotation level, must be either a column in the mutation
dataframe, a parsed variation thereof, or a custom annotation
level listed in `MuTree.mut_fields`.
Returns:
split_muts (:obj:`dict` of :obj:`pd.DataFrame`)
"""
# level names have to consist of a base level name and an al
# parsing label separated by an underscore
lvl_info = lvl_name.split('_')
if len(lvl_info) > 2:
raise ValueError(
"Invalid level name {} with more than two fields!".format(
lvl_name)
)
# if a parsing label is present, add the parsed level
# to the table of mutations
elif len(lvl_info) == 2:
parse_lbl = lvl_info[1].lower()
parse_fx = 'parse_{}'.format(parse_lbl)
if lvl_info[0] not in muts:
lvl_func = 'cls.muts_{}'.format(lvl_info[0].lower())
muts_dict = eval(lvl_func)(muts, **kwargs)
muts = pd.DataFrame([])
for lvl, muts_df in muts_dict.items():
muts = pd.concat([
muts, muts_df.join(pd.Series(lvl, index=muts_df.index,
name=lvl_info[0]))
])
if parse_fx in cls.__dict__:
muts = eval('cls.{}'.format(parse_fx))(muts, lvl_info[0])
else:
raise ValueError(
"Custom parse label {} must have a corresponding <{}> "
"method defined in the MuTree class!".format(
parse_lbl, parse_fx)
)
# splits mutations according to values of the specified level
if isinstance(muts, tuple):
if np.all( | pd.isnull(val) | pandas.isnull |
"""
Tests for zipline/utils/pandas_utils.py
"""
from unittest import skipIf
import pandas as pd
from zipline.testing import parameter_space, ZiplineTestCase
from zipline.testing.predicates import assert_equal
from zipline.utils.pandas_utils import (
categorical_df_concat,
nearest_unequal_elements,
new_pandas,
skip_pipeline_new_pandas,
)
class TestNearestUnequalElements(ZiplineTestCase):
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements(self, tz):
dts = pd.to_datetime(
['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09'],
).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-30', None, '2014-01-01'),
('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, '2014-01-05'),
('2014-01-02', '2014-01-01', '2014-01-05'),
('2014-01-03', '2014-01-01', '2014-01-05'),
('2014-01-04', '2014-01-01', '2014-01-05'),
('2014-01-05', '2014-01-01', '2014-01-06'),
('2014-01-06', '2014-01-05', '2014-01-09'),
('2014-01-07', '2014-01-06', '2014-01-09'),
('2014-01-08', '2014-01-06', '2014-01-09'),
('2014-01-09', '2014-01-06', None),
('2014-01-10', '2014-01-09', None),
('2014-01-11', '2014-01-09', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
@parameter_space(tz=['UTC', 'US/Eastern'], __fail_fast=True)
def test_nearest_unequal_elements_short_dts(self, tz):
# Length 1.
dts = pd.to_datetime(['2014-01-01']).tz_localize(tz)
def t(s):
return None if s is None else pd.Timestamp(s, tz=tz)
for dt, before, after in (('2013-12-31', None, '2014-01-01'),
('2014-01-01', None, None),
('2014-01-02', '2014-01-01', None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
# Length 0
dts = pd.to_datetime([]).tz_localize(tz)
for dt, before, after in (('2013-12-31', None, None),
('2014-01-01', None, None),
('2014-01-02', None, None)):
computed = nearest_unequal_elements(dts, t(dt))
expected = (t(before), t(after))
self.assertEqual(computed, expected)
def test_nearest_unequal_bad_input(self):
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2014']),
pd.Timestamp('2014'),
)
self.assertEqual(str(e.exception), 'dts must be unique')
with self.assertRaises(ValueError) as e:
nearest_unequal_elements(
pd.to_datetime(['2014', '2013']),
pd.Timestamp('2014'),
)
self.assertEqual(
str(e.exception),
'dts must be sorted in increasing order',
)
class TestCatDFConcat(ZiplineTestCase):
@skipIf(new_pandas, skip_pipeline_new_pandas)
def test_categorical_df_concat(self):
inp = [
pd.DataFrame(
{
'A': | pd.Series(['a', 'b', 'c'], dtype='category') | pandas.Series |
# -*- coding: utf-8 -*-
"""
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
import pytest
from pandas.compat import PY3, StringIO, u
from pandas.errors import ParserError
from pandas import DataFrame
import pandas.util.testing as tm
class QuotingTests(object):
def test_bad_quote_char(self):
data = '1,2,3'
# Python 2.x: "...must be an 1-character..."
# Python 3.x: "...must be a 1-character..."
msg = '"quotechar" must be a(n)? 1-character string'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='foo')
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
msg = '"quotechar" must be string, not int'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=2)
def test_bad_quoting(self):
data = '1,2,3'
msg = '"quoting" must be an integer'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting='foo')
# quoting must in the range [0, 3]
msg = 'bad "quoting" value'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quoting=5)
def test_quote_char_basic(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
def test_quote_char_various(self):
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, 'cat']],
columns=['a', 'b', 'c'])
quote_chars = ['~', '*', '%', '$', '@', 'P']
for quote_char in quote_chars:
new_data = data.replace('"', quote_char)
result = self.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
def test_null_quote_char(self):
data = 'a,b,c\n1,2,3'
# sanity checks
msg = 'quotechar must be set if quoting enabled'
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_MINIMAL)
with pytest.raises(TypeError, match=msg):
self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_MINIMAL)
# no errors should be raised if quoting is None
expected = DataFrame([[1, 2, 3]],
columns=['a', 'b', 'c'])
result = self.read_csv(StringIO(data), quotechar=None,
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='',
quoting=csv.QUOTE_NONE)
tm.assert_frame_equal(result, expected)
def test_quoting_various(self):
data = '1,2,"foo"'
cols = ['a', 'b', 'c']
# QUOTE_MINIMAL and QUOTE_ALL apply only to
# the CSV writer, so they should have no
# special effect for the CSV reader
expected = DataFrame([[1, 2, 'foo']], columns=cols)
# test default (afterwards, arguments are all explicit)
result = self.read_csv(StringIO(data), names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_MINIMAL, names=cols)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_ALL, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone
expected = DataFrame([[1, 2, '"foo"']], columns=cols)
result = self.read_csv(StringIO(data), quotechar='"',
quoting=csv.QUOTE_NONE, names=cols)
tm.assert_frame_equal(result, expected)
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
expected = DataFrame([[1.0, 2.0, 'foo']], columns=cols)
result = self.read_csv( | StringIO(data) | pandas.compat.StringIO |
from functools import reduce
from config import PERIODO_INI, PERIODO_FIN
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None
def check_periods(col):
print(pd.DataFrame(
{"Rango": [col.min(), col.max()]},
index=['MIN', 'MAX'])
)
# HELPER FUNCTIONS
def transform_date(s):
dates = {date: pd.to_datetime(date) for date in s.unique()}
return s.map(dates)
def dates_to_int(dates):
periodos = {fecha: i + 1
for i, fecha
in enumerate(sorted(dates.unique(),
reverse=True))
}
return dates.map(periodos)
def simplify_history(x):
return "".join(["1" if int(n) > 0 else "0" for n in x.split(" ")])
def to_yearmonth(s):
dates = {date: pd.Timestamp(date).strftime('%Y-%m') for date in s.unique()}
return s.map(dates)
# TRANSFORMING PIPELINE FUNCTIONS
def transform_polizas(df_polizas):
df_polizas['FECHA_VIG_POL'] = transform_date(df_polizas['FECHA_VIG_POL'])
df_polizas['mes_anio_vig'] = df_polizas['FECHA_VIG_POL'].dt.strftime('%Y-%m')
to_pivot = df_polizas[["CIF_ID",
"NUM_SECU_POL",
"MCA_VIGENCIA",
"mes_anio_vig"]].drop_duplicates()
del df_polizas
df_polizas_pivoted = to_pivot.pivot_table(index='CIF_ID',
columns=['mes_anio_vig'],
values=['MCA_VIGENCIA'],
aggfunc='count',
fill_value=0)
del to_pivot
df_polizas_pivoted = df_polizas_pivoted.astype(str)
df_polizas_pivoted["history"] = df_polizas_pivoted.apply(" ".join, axis=1)
new_df = | pd.DataFrame(df_polizas_pivoted.index) | pandas.DataFrame |
import os
import json
import pandas as pd
import numpy as np
from collections import namedtuple
import pytest
import sklearn.datasets as datasets
import sklearn.neighbors as knn
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
import mlflow.sklearn
from mlflow.protos.databricks_pb2 import ErrorCode, MALFORMED_REQUEST, BAD_REQUEST
from tests.helper_functions import pyfunc_serve_and_score_model, random_int, random_str
ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"])
@pytest.fixture(scope="session")
def sklearn_model():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
knn_model = knn.KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
@pytest.fixture
def model_path(tmpdir):
return str(os.path.join(tmpdir.strpath, "model"))
@pytest.mark.large
def test_scoring_server_responds_to_invalid_json_input_with_stacktrace_and_error_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
incorrect_json_content = json.dumps({"not": "a serialized dataframe"})
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=incorrect_json_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
@pytest.mark.large
def test_scoring_server_responds_to_malformed_json_input_with_stacktrace_and_error_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
malformed_json_content = "this is,,,, not valid json"
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=malformed_json_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
@pytest.mark.large
def test_scoring_server_responds_to_invalid_pandas_input_format_with_stacktrace_and_error_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
# The pyfunc scoring server expects a serialized Pandas Dataframe in `split` or `records`
# format; passing a serialized Dataframe in `table` format should yield a readable error
pandas_table_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="table")
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=pandas_table_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
@pytest.mark.large
def test_scoring_server_responds_to_incompatible_inference_dataframe_with_stacktrace_and_error_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
incompatible_df = pd.DataFrame(np.array(range(10)))
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=incompatible_df,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(BAD_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
@pytest.mark.large
def test_scoring_server_responds_to_invalid_csv_input_with_stacktrace_and_error_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
# Any empty string is not valid pandas CSV
incorrect_csv_content = ""
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=incorrect_csv_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_CSV)
response_json = json.loads(response.content)
assert "error_code" in response_json
assert response_json["error_code"] == ErrorCode.Name(MALFORMED_REQUEST)
assert "message" in response_json
assert "stack_trace" in response_json
@pytest.mark.large
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_records_orientation(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_record_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="records")
response_records_content_type = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=pandas_record_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED)
assert response_records_content_type.status_code == 200
@pytest.mark.large
def test_scoring_server_successfully_evaluates_correct_dataframes_with_pandas_split_orientation(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split")
response_default_content_type = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON)
assert response_default_content_type.status_code == 200
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=pandas_split_content,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED)
assert response.status_code == 200
@pytest.mark.large
def test_scoring_server_responds_to_invalid_content_type_request_with_unsupported_content_type_code(
sklearn_model, model_path):
mlflow.sklearn.save_model(sk_model=sklearn_model.model, path=model_path)
pandas_split_content = pd.DataFrame(sklearn_model.inference_data).to_json(orient="split")
response = pyfunc_serve_and_score_model(
model_path=os.path.abspath(model_path),
data=pandas_split_content,
content_type="not_a_supported_content_type")
assert response.status_code == 415
@pytest.mark.large
def test_parse_json_input_records_oriented():
size = 20
data = {"col_m": [random_int(0, 1000) for _ in range(size)],
"col_z": [random_str(4) for _ in range(size)],
"col_a": [random_int() for _ in range(size)]}
p1 = pd.DataFrame.from_dict(data)
p2 = pyfunc_scoring_server.parse_json_input(p1.to_json(orient="records"), orient="records")
# "records" orient may shuffle column ordering. Hence comparing each column Series
for col in data.keys():
assert all(p1[col] == p2[col])
@pytest.mark.large
def test_parse_json_input_split_oriented():
size = 200
data = {"col_m": [random_int(0, 1000) for _ in range(size)],
"col_z": [random_str(4) for _ in range(size)],
"col_a": [random_int() for _ in range(size)]}
p1 = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 07:06:50 2021
@author: nmei
"""
import os
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_style('white')
sns.set_context('paper',font_scale = 2)
from matplotlib import rc
from matplotlib import pyplot as plt
from matplotlib.patches import Patch
# rc('font',weight = 'bold')
# plt.rcParams['axes.labelsize'] = 45
# plt.rcParams['axes.labelweight'] = 'bold'
# plt.rcParams['axes.titlesize'] = 45
# plt.rcParams['axes.titleweight'] = 'bold'
# plt.rcParams['ytick.labelsize'] = 32
# plt.rcParams['xtick.labelsize'] = 32
working_dir = '../results/first_layer_only'
figure_dir = '../figures'
collect_dir = '/export/home/nmei/nmei/properties_of_unconscious_processing/all_figures'
paper_dir = '/export/home/nmei/nmei/properties_of_unconscious_processing/figures'
marker_factor = 10
marker_type = ['8','s','p','*','+','D','o']
alpha_level = .75
model_names = ['AlexNet','VGG19','MobileNetV2','DenseNet169','ResNet50']
dict_folder = dict(ResNet50 = 'resnet50',
VGG19 = 'vgg19_bn',
AlexNet = 'alexnet',
MobileNetV2 = 'mobilenet',
DenseNet169 = 'densenet169')
n_noise_levels = 50
noise_levels = np.concatenate([[0],[item for item in np.logspace(-1,3,n_noise_levels)]])
x_map = {round(item,9):ii for ii,item in enumerate(noise_levels)}
inverse_x_map = {round(value,9):key for key,value in x_map.items()}
dfs = []
df_chance = []
for model_name,folder_name in dict_folder.items():
df = pd.read_csv(os.path.join(working_dir,folder_name,'decodings.csv'))
df['x'] = df['noise_level'].round(9).map(x_map)
df['x_id'] = df['noise_level'].round(9).map(x_map)
df['x'] = df['x'].apply(lambda x: [x + np.random.normal(0,0.1,size = 1)][0][0])
df_chance.append(df)
df_plot = pd.melt(df,id_vars = ['model_name',
'noise_level',
'x',],
value_vars = ['cnn_score',
'first_score_mean',],
value_name = 'ROC AUC')
temp = pd.melt(df,id_vars = ['model_name',
'noise_level',
'x',],
value_vars = ['cnn_pval',
'svm_first_pval',])
df_plot['model_name'] = model_name
df_plot['pvals'] = temp['value'].values.copy()
df_plot['Type'] = df_plot['variable'].apply(lambda x: x.split('_')[0].upper())
df_plot['Type'] = df_plot['Type'].map({'CNN':'CNN',
'FIRST':'Decode first layer'})
dfs.append(df_plot)
df_plot = pd.concat(dfs)
df_chance = | pd.concat(df_chance) | pandas.concat |
import json
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from tqdm import tqdm
from utils.utils import normalize_www_prefix
NAN_VALUE = -1
def read_csv(path: str) -> pd.DataFrame:
"""Opens the csv dataset as DataFrame and cast types.
"""
date_parser = lambda c: | pd.to_datetime(c, format='%Y-%m-%dT%H:%M:%SZ', errors='coerce') | pandas.to_datetime |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import OrderedDict
import numpy as np
import pandas as pd
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
import mars.dataframe as md
from mars.operands import ShuffleProxy
from mars.tests.core import TestBase, ExecutorForTest, assert_groupby_equal
from mars.utils import arrow_array_to_objects
class Test(TestBase):
def setUp(self):
super().setUp()
self.executor = ExecutorForTest('numpy')
self.ctx, self.executor = self._create_test_context(self.executor)
self.ctx.__enter__()
def tearDown(self) -> None:
self.ctx.__exit__(None, None, None)
def testGroupBy(self):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
mdf = md.DataFrame(df1, chunk_size=3)
grouped = mdf.groupby('b')
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
df1.groupby('b'))
df2 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')},
index=['i' + str(i) for i in range(9)])
mdf = md.DataFrame(df2, chunk_size=3)
grouped = mdf.groupby('b')
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
df2.groupby('b'))
# test groupby series
grouped = mdf.groupby(mdf['b'])
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
df2.groupby(df2['b']))
# test groupby multiple series
grouped = mdf.groupby(by=[mdf['b'], mdf['c']])
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
df2.groupby(by=[df2['b'], df2['c']]))
df3 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')},
index=pd.MultiIndex.from_tuples([(i % 3, 'i' + str(i)) for i in range(9)]))
mdf = md.DataFrame(df3, chunk_size=3)
grouped = mdf.groupby(level=0)
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
df3.groupby(level=0))
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
grouped = ms1.groupby(lambda x: x % 3)
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
series1.groupby(lambda x: x % 3))
# test groupby series
grouped = ms1.groupby(ms1)
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
series1.groupby(series1))
series2 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3],
index=['i' + str(i) for i in range(9)])
ms2 = md.Series(series2, chunk_size=3)
grouped = ms2.groupby(lambda x: int(x[1:]) % 3)
assert_groupby_equal(self.executor.execute_dataframe(grouped, concat=True)[0],
series2.groupby(lambda x: int(x[1:]) % 3))
def testGroupByGetItem(self):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')},
index=pd.MultiIndex.from_tuples([(i % 3, 'i' + str(i)) for i in range(9)]))
mdf = md.DataFrame(df1, chunk_size=3)
r = mdf.groupby(level=0)[['a', 'b']]
assert_groupby_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby(level=0)[['a', 'b']], with_selection=True)
r = mdf.groupby(level=0)[['a', 'b']].sum(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby(level=0)[['a', 'b']].sum())
r = mdf.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby(level=0)[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']]
assert_groupby_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b')[['a', 'b']], with_selection=True)
r = mdf.groupby('b')[['a', 'c']]
assert_groupby_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b')[['a', 'c']], with_selection=True)
r = mdf.groupby('b')[['a', 'b']].sum(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b')[['a', 'b']].sum())
r = mdf.groupby('b')[['a', 'b']].agg(['sum', 'count'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b')[['a', 'b']].agg(['sum', 'count']))
r = mdf.groupby('b')[['a', 'c']].agg(['sum', 'count'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b')[['a', 'c']].agg(['sum', 'count']))
r = mdf.groupby('b')[['a', 'b']].apply(lambda x: x + 1)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b')[['a', 'b']].apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].transform(lambda x: x + 1)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b')[['a', 'b']].transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b')[['a', 'b']].cumsum()
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b')[['a', 'b']].cumsum().sort_index())
r = mdf.groupby('b').a
assert_groupby_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b').a, with_selection=True)
r = mdf.groupby('b').a.sum(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b').a.sum())
r = mdf.groupby('b').a.agg(['sum', 'mean', 'var'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r, concat=True)[0],
df1.groupby('b').a.agg(['sum', 'mean', 'var']))
r = mdf.groupby('b').a.apply(lambda x: x + 1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b').a.apply(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.transform(lambda x: x + 1)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b').a.transform(lambda x: x + 1).sort_index())
r = mdf.groupby('b').a.cumsum()
pd.testing.assert_series_equal(self.executor.execute_dataframe(r, concat=True)[0].sort_index(),
df1.groupby('b').a.cumsum().sort_index())
def testDataFrameGroupByAgg(self):
rs = np.random.RandomState(0)
df1 = pd.DataFrame({'a': rs.choice([2, 3, 4], size=(100,)),
'b': rs.choice([2, 3, 4], size=(100,))})
mdf = md.DataFrame(df1, chunk_size=3)
df2 = pd.DataFrame({'c1': np.arange(10).astype(np.int64),
'c2': rs.choice(['a', 'b', 'c'], (10,)),
'c3': rs.rand(10)})
mdf2 = md.DataFrame(df2, chunk_size=2)
for method in ['tree', 'shuffle']:
r0 = mdf2.groupby('c2').agg('size', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r0, concat=True)[0],
df2.groupby('c2').agg('size'))
r1 = mdf.groupby('a').agg('sum', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r1, concat=True)[0],
df1.groupby('a').agg('sum'))
r2 = mdf.groupby('b').agg('min', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r2, concat=True)[0],
df1.groupby('b').agg('min'))
r1 = mdf2.groupby('c2').agg('prod', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r1, concat=True)[0],
df2.groupby('c2').agg('prod'))
r2 = mdf2.groupby('c2').agg('max', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r2, concat=True)[0],
df2.groupby('c2').agg('max'))
r3 = mdf2.groupby('c2').agg('count', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby('c2').agg('count'))
r4 = mdf2.groupby('c2').agg('mean', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r4, concat=True)[0],
df2.groupby('c2').agg('mean'))
r5 = mdf2.groupby('c2').agg('var', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r5, concat=True)[0],
df2.groupby('c2').agg('var'))
r6 = mdf2.groupby('c2').agg('std', method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r6, concat=True)[0],
df2.groupby('c2').agg('std'))
agg = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any']
r3 = mdf2.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby('c2').agg(agg))
agg = OrderedDict([('c1', ['min', 'mean']), ('c3', 'std')])
r3 = mdf2.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby('c2').agg(agg))
agg = OrderedDict([('c1', 'min'), ('c3', 'sum')])
r3 = mdf2.groupby('c2').agg(agg, method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby('c2').agg(agg))
r3 = mdf2.groupby('c2').agg({'c1': 'min'}, method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby('c2').agg({'c1': 'min'}))
# test groupby series
r3 = mdf2.groupby(mdf2['c2']).sum(method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
df2.groupby(df2['c2']).sum())
r8 = mdf2.groupby('c2').size(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r8, concat=True)[0],
df2.groupby('c2').size())
r4 = mdf2.groupby('c2').sum(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r4, concat=True)[0],
df2.groupby('c2').sum())
r5 = mdf2.groupby('c2').prod(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r5, concat=True)[0],
df2.groupby('c2').prod())
r6 = mdf2.groupby('c2').min(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r6, concat=True)[0],
df2.groupby('c2').min())
r7 = mdf2.groupby('c2').max(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r7, concat=True)[0],
df2.groupby('c2').max())
r8 = mdf2.groupby('c2').count(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r8, concat=True)[0],
df2.groupby('c2').count())
r9 = mdf2.groupby('c2').mean(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r9, concat=True)[0],
df2.groupby('c2').mean())
r10 = mdf2.groupby('c2').var(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r10, concat=True)[0],
df2.groupby('c2').var())
r11 = mdf2.groupby('c2').std(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r11, concat=True)[0],
df2.groupby('c2').std())
r10 = mdf2.groupby('c2').all(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r10, concat=True)[0],
df2.groupby('c2').all())
r11 = mdf2.groupby('c2').any(method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r11, concat=True)[0],
df2.groupby('c2').any())
# test as_index=False
r12 = mdf2.groupby('c2', as_index=False).agg('mean', method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r12, concat=True)[0],
df2.groupby('c2', as_index=False).agg('mean'))
self.assertFalse(r12.op.groupby_params['as_index'])
# test as_index=False takes no effect
r13 = mdf2.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r13, concat=True)[0],
df2.groupby(['c1', 'c2'], as_index=False).agg(['mean', 'count']))
self.assertTrue(r13.op.groupby_params['as_index'])
r14 = mdf2.groupby('c2').agg(['cumsum', 'cumcount'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r14, concat=True)[0].sort_index(),
df2.groupby('c2').agg(['cumsum', 'cumcount']).sort_index())
# test auto method
r15 = mdf2.groupby('c2').agg('prod')
self.assertEqual(r15.op.method, 'auto')
self.assertTrue(all((not isinstance(c.op, ShuffleProxy)) for c in r15.build_graph(tiled=True)))
def testSeriesGroupByAgg(self):
rs = np.random.RandomState(0)
series1 = pd.Series(rs.rand(10))
ms1 = md.Series(series1, chunk_size=3)
for method in ['tree', 'shuffle']:
r0 = ms1.groupby(lambda x: x % 2).agg('size', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r0, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('size'))
r1 = ms1.groupby(lambda x: x % 2).agg('sum', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r1, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('sum'))
r2 = ms1.groupby(lambda x: x % 2).agg('min', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r2, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('min'))
r1 = ms1.groupby(lambda x: x % 2).agg('prod', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r1, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('prod'))
r2 = ms1.groupby(lambda x: x % 2).agg('max', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r2, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('max'))
r3 = ms1.groupby(lambda x: x % 2).agg('count', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r3, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('count'))
r4 = ms1.groupby(lambda x: x % 2).agg('mean', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r4, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('mean'))
r5 = ms1.groupby(lambda x: x % 2).agg('var', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r5, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('var'))
r6 = ms1.groupby(lambda x: x % 2).agg('std', method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r6, concat=True)[0],
series1.groupby(lambda x: x % 2).agg('std'))
agg = ['std', 'mean', 'var', 'max', 'count', 'size', 'all', 'any']
r3 = ms1.groupby(lambda x: x % 2).agg(agg, method=method)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r3, concat=True)[0],
series1.groupby(lambda x: x % 2).agg(agg))
# test groupby series
r3 = ms1.groupby(ms1).sum(method=method)
pd.testing.assert_series_equal(self.executor.execute_dataframe(r3, concat=True)[0],
series1.groupby(series1).sum())
r4 = ms1.groupby(lambda x: x % 2).size(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r4, concat=True)[0],
series1.groupby(lambda x: x % 2).size())
r4 = ms1.groupby(lambda x: x % 2).sum(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r4, concat=True)[0],
series1.groupby(lambda x: x % 2).sum())
r5 = ms1.groupby(lambda x: x % 2).prod(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r5, concat=True)[0],
series1.groupby(lambda x: x % 2).prod())
r6 = ms1.groupby(lambda x: x % 2).min(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r6, concat=True)[0],
series1.groupby(lambda x: x % 2).min())
r7 = ms1.groupby(lambda x: x % 2).max(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r7, concat=True)[0],
series1.groupby(lambda x: x % 2).max())
r8 = ms1.groupby(lambda x: x % 2).count(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r8, concat=True)[0],
series1.groupby(lambda x: x % 2).count())
r9 = ms1.groupby(lambda x: x % 2).mean(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r9, concat=True)[0],
series1.groupby(lambda x: x % 2).mean())
r10 = ms1.groupby(lambda x: x % 2).var(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r10, concat=True)[0],
series1.groupby(lambda x: x % 2).var())
r11 = ms1.groupby(lambda x: x % 2).std(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r11, concat=True)[0],
series1.groupby(lambda x: x % 2).std())
r10 = ms1.groupby(lambda x: x % 2).all(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r10, concat=True)[0],
series1.groupby(lambda x: x % 2).all())
r11 = ms1.groupby(lambda x: x % 2).any(method='tree')
pd.testing.assert_series_equal(self.executor.execute_dataframe(r11, concat=True)[0],
series1.groupby(lambda x: x % 2).any())
r11 = ms1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount'], method='tree')
pd.testing.assert_frame_equal(self.executor.execute_dataframe(r11, concat=True)[0].sort_index(),
series1.groupby(lambda x: x % 2).agg(['cumsum', 'cumcount']).sort_index())
def testGroupByApply(self):
df1 = pd.DataFrame({'a': [3, 4, 5, 3, 5, 4, 1, 2, 3],
'b': [1, 3, 4, 5, 6, 5, 4, 4, 4],
'c': list('aabaaddce')})
def apply_df(df):
df = df.sort_index()
df.a += df.b
if len(df.index) > 0:
df = df.iloc[:-1, :]
return df
def apply_series(s, truncate=True):
s = s.sort_index()
if truncate and len(s.index) > 0:
s = s.iloc[:-1]
return s
mdf = md.DataFrame(df1, chunk_size=3)
applied = mdf.groupby('b').apply(lambda df: None)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0],
df1.groupby('b').apply(lambda df: None))
applied = mdf.groupby('b').apply(apply_df)
pd.testing.assert_frame_equal(self.executor.execute_dataframe(applied, concat=True)[0].sort_index(),
df1.groupby('b').apply(apply_df).sort_index())
applied = mdf.groupby('b').apply(lambda df: df.a, output_type='series')
pd.testing.assert_series_equal(self.executor.execute_dataframe(applied, concat=True)[0].sort_index(),
df1.groupby('b').apply(lambda df: df.a).sort_index())
applied = mdf.groupby('b').apply(lambda df: df.a.sum())
pd.testing.assert_series_equal(self.executor.execute_dataframe(applied, concat=True)[0].sort_index(),
df1.groupby('b').apply(lambda df: df.a.sum()).sort_index())
series1 = pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3])
ms1 = md.Series(series1, chunk_size=3)
applied = ms1.groupby(lambda x: x % 3).apply(lambda df: None)
pd.testing.assert_series_equal(self.executor.execute_dataframe(applied, concat=True)[0],
series1.groupby(lambda x: x % 3).apply(lambda df: None))
applied = ms1.groupby(lambda x: x % 3).apply(apply_series)
pd.testing.assert_series_equal(self.executor.execute_dataframe(applied, concat=True)[0].sort_index(),
series1.groupby(lambda x: x % 3).apply(apply_series).sort_index())
sindex2 = pd.MultiIndex.from_arrays([list(range(9)), list('ABCDEFGHI')])
series2 = | pd.Series([3, 4, 5, 3, 5, 4, 1, 2, 3], index=sindex2) | pandas.Series |
# importing all the required libraries
import numpy as np
import pandas as pd
from datetime import datetime
import time, datetime
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, LabelEncoder, MinMaxScaler
from chart_studio.plotly import plotly
import plotly.offline as offline
import plotly.graph_objs as go
offline.init_notebook_mode()
from collections import Counter
import pickle
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import math
from tqdm import tqdm
# Reading all the files
air_visit_data = pd.read_csv('air_visit_data.csv')
air_store_info = pd.read_csv('air_store_info.csv')
air_reserve = pd.read_csv('air_reserve.csv')
hpg_store_info = pd.read_csv('hpg_store_info.csv')
hpg_reserve = pd.read_csv('hpg_reserve.csv')
date_info = | pd.read_csv('date_info.csv') | pandas.read_csv |
import re
import pandas as pd
from google.oauth2 import service_account
from langdetect import detect_langs
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer as SIA
import numpy as np
from numpy import mat, mean, sqrt, diag
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
def language_filter(df, series=str, language_select=str):
df = df.reset_index(drop=True)
# Purpose: Detect language of string using Google's langDetect
# Arguments: DF; DataFrame, (series = name of column in df as string), (language_select = two letter
# string of language code that you want)
df_copy = df.copy()
df_copy['language'] = df_copy[series].apply(detect_langs)
# new column ['contains_your_language'] returns 'True' if ['language'] contains any probability of your language
df_copy['contains_your_language'] = df_copy['language'].apply(str).str.contains(language_select)
# parse data to only return values where ['contains_your_language'] is True
df_copy = df_copy.loc[df_copy['contains_your_language'] == True]
# remove ['language'] and ['contains_your_language'] as they are no longer needed
del df_copy['language']
del df_copy['contains_your_language']
# reindex df
df_copy = df_copy.reset_index(drop=True)
# return your new filtered DataFrame
return df_copy
def get_sentiment(df, series=str):
# initialize sentiment classifier
sia = SIA()
# get sentiment
sentiment = df[series].apply(sia.polarity_scores)
# create sentiment df
sentiment = pd.DataFrame(sentiment.tolist())
# merge sentiment with your df
df = df.merge(sentiment, how='left', left_index=True, right_index=True)
df['sentiment'] = df['compound'].apply(categorize_sentiment)
df['sentiment'] = pd.Categorical(df['sentiment'])
binary_sentiment = df['sentiment'].str.get_dummies()
df = df.merge(binary_sentiment, how='left', left_index=True, right_index=True)
return df
def categorize_sentiment(x):
if x >= 0.05:
return 'positive_comment'
elif 0.05 > x > -0.05:
return 'neutral_comment'
elif -0.05 >= x:
return 'negative_comment'
def group_sentiment(dfSentiment):
dfSentiment['datetime'] = pd.to_datetime(dfSentiment['created_utc'], unit='s')
dfSentiment['date'] = pd.DatetimeIndex(dfSentiment['datetime']).date
dfSentiment = dfSentiment[
['created_utc', 'negative_comment', 'neutral_comment', 'positive_comment', 'datetime', 'date']]
dfSentiment = dfSentiment.groupby(by=['date']).sum()
return dfSentiment
def collect_big_query(sQuery):
credentials = service_account.Credentials.from_service_account_file(r'insert-file-path-for-json-creditial-file')
project_id = 'insert-project-id-here'
data = pd.read_gbq(sQuery, project_id=project_id, credentials=credentials, dialect='standard')
return data
# Funktion til Fama-MacBeth 2-pass regression der skal bruges i Fama-French Framework
def FMB(returns, riskFactors):
# function fmbOut = famaMacBeth(returns,riskFactors)
# Purpose: Estimate linear asset pricing models using the Fama and MacBeth
# (1973) two-pass cross-sectional regression methodology.
#
# Input: returns = TxN maxtrix of portfolio excess returns
# riskFactors = TxK matrix of common risk factors
#
# Output: A struct including results from the two steps
# Use mat for easier linear algebra
factors = mat(riskFactors.values)
excessReturns = mat(returns.values) # Måske ikke .values
# Shape information
t, n = excessReturns.shape
# Time series regressions
X = sm.add_constant(factors) # Laver X ved at inkludere en 1 vektor på faktorer
ts_res = sm.OLS(excessReturns, X).fit() # First pass regression
beta = ts_res.params[1:]
# Cross-section regression
cs_params = pd.DataFrame()
cs_X = sm.add_constant(beta.T)
for iObs in range(t):
cs_params = pd.concat([cs_params, pd.DataFrame(sm.OLS(excessReturns[iObs].T, cs_X).fit().params)], axis=1)
# Risk prices and Fama-MacBeth standard errors and t-stats
RiskPrices = cs_params.mean(axis=1).T
seGamma = sqrt((cs_params.T.sub(RiskPrices) ** 2).sum(axis=0) / t ** 2)
tGamma = RiskPrices / seGamma
# Mean and fitted excess returns
meanReturns = pd.DataFrame(mean(excessReturns, 0))
fittedValues = (pd.DataFrame(cs_X) @ pd.DataFrame(RiskPrices)).T
# Cross sectional R^2
Ones = pd.DataFrame(np.ones((1, n), dtype=int)).T
errResid = meanReturns - fittedValues
s2 = mean(errResid ** 2, axis=1)
vary = mean((meanReturns.T - Ones * mean(meanReturns, axis=1)) ** 2)
rSquared = 100 * (1 - s2 / vary)
fmbOut = dict()
fmbOut['beta'] = ts_res.params
fmbOut['gamma'] = RiskPrices
fmbOut['se'] = seGamma
fmbOut['tstat'] = tGamma
fmbOut['r2'] = rSquared
fmbOut['fit'] = fittedValues
fmbOut['mean'] = meanReturns
return fmbOut
def PortfolioSort(dfReturns, dfMarketCap, dfSignal):
dfSignalSorted = pd.DataFrame()
dfReturns = dfReturns[1:]
dfMarketCap = dfMarketCap[1:]
dfSignal = dfSignal[:-1]
# Lag returns based on portfolio decision
dfReturns.index = dfSignal.index
dfMarketCap.index = dfSignal.index
# Calculate Number of current coins in market portfolio:
MarketCapDummy = dfMarketCap.iloc[:, :].ge(0.1, axis=0)
MarketCapDummy = MarketCapDummy.where(MarketCapDummy == 1, np.nan)
dfSignal = dfSignal.multiply(MarketCapDummy)
NumActiveCoins = dfSignal.iloc[:, :].ge(-1.1, axis=0).sum(axis=1)
# Rank top based on signal
df_rank = dfSignal.stack(dropna=False).groupby(level=0).rank(ascending=False, method='first').unstack()
dfSignal_Trank = df_rank.le(round(NumActiveCoins / 3), axis=0)
# Get top Market cap and returns
dfMarketCap_Top = dfMarketCap[dfSignal_Trank]
dfReturns_Top = dfReturns[dfSignal_Trank]
dfMarketCap_Top = dfMarketCap_Top.fillna(0)
dfReturns_Top = dfReturns_Top.fillna(0)
# Get bottun based on signal
df_rank = dfSignal.stack(dropna=False).groupby(level=0).rank(ascending=True, method='first').unstack()
dfSignal_Brank = df_rank.le(round(NumActiveCoins / 3), axis=0)
# get bottom market cap and returns
dfMarketCap_Low = dfMarketCap[dfSignal_Brank]
dfReturns_Low = dfReturns[dfSignal_Brank]
dfMarketCap_Low = dfMarketCap_Low.fillna(0)
dfReturns_Low = dfReturns_Low.fillna(0)
dfReturns_Mid = dfReturns.sub(dfReturns_Top)
dfReturns_Mid = dfReturns_Mid.sub(dfReturns_Low)
dfMarketCap_Mid = dfMarketCap.sub(dfMarketCap_Top)
dfMarketCap_Mid = dfMarketCap_Mid.sub(dfMarketCap_Low)
dfReturns_Mid = dfReturns_Mid.fillna(0)
dfMarketCap_Mid = dfMarketCap_Mid.fillna(0)
dfSignalSorted['Low'] = dfReturns_Low.multiply(dfMarketCap_Low).sum(axis=1) / dfMarketCap_Low.sum(axis=1)
dfSignalSorted['Mid'] = dfReturns_Mid.multiply(dfMarketCap_Mid).sum(axis=1) / dfMarketCap_Mid.sum(axis=1)
dfSignalSorted['Top'] = dfReturns_Top.multiply(dfMarketCap_Top).sum(axis=1) / dfMarketCap_Top.sum(axis=1)
dfSignalSorted['LS'] = dfSignalSorted['Top'] - dfSignalSorted['Low']
return dfSignalSorted
def FactorSort(dfReturns, dfMarketCap, dfSignal):
dfSignalSorted = pd.DataFrame()
dfReturns = dfReturns[1:]
dfMarketCap = dfMarketCap[1:]
dfSignal = dfSignal[:-1]
# Calculate Number of current coins in market portfolio:
MarketCapDummy = dfMarketCap.iloc[:, :].ge(0.1, axis=0)
MarketCapDummy = MarketCapDummy.where(MarketCapDummy == 1, np.nan)
dfSignal = dfSignal.multiply(MarketCapDummy)
NumActiveCoins = dfSignal.iloc[:, :].ge(-1.1, axis=0).sum(axis=1)
# Rank top based on signal
df_rank = dfSignal.stack(dropna=False).groupby(level=0).rank(ascending=False, method='first').unstack()
dfSignal_Trank = df_rank.le(round(NumActiveCoins * 0.3), axis=0)
# Get top Market cap and returns
dfMarketCap_Top = dfMarketCap[dfSignal_Trank]
dfReturns_Top = dfReturns[dfSignal_Trank]
dfMarketCap_Top = dfMarketCap_Top.fillna(0)
dfReturns_Top = dfReturns_Top.fillna(0)
# Get bottun based on signal
df_rank = dfSignal.stack(dropna=False).groupby(level=0).rank(ascending=True, method='first').unstack()
dfSignal_Brank = df_rank.le(round(NumActiveCoins * 0.3), axis=0)
# get bottom market cap and returns
dfMarketCap_Low = dfMarketCap[dfSignal_Brank]
dfReturns_Low = dfReturns[dfSignal_Brank]
dfMarketCap_Low = dfMarketCap_Low.fillna(0)
dfReturns_Low = dfReturns_Low.fillna(0)
dfReturns_Mid = dfReturns.sub(dfReturns_Top)
dfReturns_Mid = dfReturns_Mid.sub(dfReturns_Low)
dfMarketCap_Mid = dfMarketCap.sub(dfMarketCap_Top)
dfMarketCap_Mid = dfMarketCap_Mid.sub(dfMarketCap_Low)
dfReturns_Mid = dfReturns_Mid.fillna(0)
dfMarketCap_Mid = dfMarketCap_Mid.fillna(0)
dfSignalSorted['Low'] = dfReturns_Low.multiply(dfMarketCap_Low).sum(axis=1) / dfMarketCap_Low.sum(axis=1)
dfSignalSorted['Mid'] = dfReturns_Mid.multiply(dfMarketCap_Mid).sum(axis=1) / dfMarketCap_Mid.sum(axis=1)
dfSignalSorted['Top'] = dfReturns_Top.multiply(dfMarketCap_Top).sum(axis=1) / dfMarketCap_Top.sum(axis=1)
dfSignalSorted['LS'] = dfSignalSorted['Top'] - dfSignalSorted['Low']
return dfSignalSorted
def ReturnSignificance(dfReturns):
# Returns: Tx5 matrix of Low, Mid1, Mid2, Top and LS returns of portfolio strategy
Ones = pd.DataFrame(np.ones((1, dfReturns.shape[0]), dtype=int)).T
Ones.index = dfReturns.index
Low_res = sm.OLS(dfReturns['P1'], Ones).fit()
Mid1_res = sm.OLS(dfReturns['P2'], Ones).fit()
Mid2_res = sm.OLS(dfReturns['P3'], Ones).fit()
Top_res = sm.OLS(dfReturns['P4'], Ones).fit()
LS_res = sm.OLS(dfReturns['LS'], Ones).fit()
Values = [[Low_res.params, Mid1_res.params, Mid2_res.params, Top_res.params, LS_res.params]]
Values.append([Low_res.bse, Mid1_res.bse, Mid2_res.bse, Top_res.bse, LS_res.bse])
Values.append([Low_res.tvalues, Mid1_res.tvalues, Mid2_res.tvalues, Top_res.tvalues, LS_res.tvalues])
Values.append([Low_res.pvalues, Mid1_res.pvalues, Mid2_res.pvalues, Top_res.pvalues, LS_res.pvalues])
df = pd.DataFrame(Values, columns=['P1', 'P2', 'P3', 'P4', 'P4'], index=['beta', 'se', 't-values', 'p-values'], dtype=np.float64)
print(LS_res.summary())
return df
def ReturnSignificance2(dfReturns):
# Returns: Tx5 matrix of Low, Mid1, Mid2, Top and LS returns of portfolio strategy
Ones = pd.DataFrame(np.ones((1, dfReturns.shape[0]), dtype=int)).T
Ones.index = dfReturns.index
Low_res = sm.OLS(dfReturns['P1'], Ones).fit()
Mid_res = sm.OLS(dfReturns['P2'], Ones).fit()
Top_res = sm.OLS(dfReturns['P3'], Ones).fit()
LS_res = sm.OLS(dfReturns['LS'], Ones).fit()
Values = [[Low_res.params, Mid_res.params, Top_res.params, LS_res.params]]
Values.append([Low_res.bse, Mid_res.bse, Top_res.bse, LS_res.bse])
Values.append([Low_res.tvalues, Mid_res.tvalues, Top_res.tvalues, LS_res.tvalues])
Values.append([Low_res.pvalues, Mid_res.pvalues, Top_res.pvalues, LS_res.pvalues])
df = pd.DataFrame(Values, columns=['P1', 'P2', 'P3', 'LS'], index=['beta', 'se', 't-values', 'p-values'], dtype=np.float64)
print(LS_res.summary())
return df
def FMB_Shank(returns, riskFactors, nLagsTS):
# function fmbOut = famaMacBeth(returns,riskFactors)
# Purpose: Estimate linear asset pricing models using the Fama and MacBeth
# (1973) two-pass cross-sectional regression methodology.
#
# Input: returns = TxN maxtrix of portfolio excess returns
# riskFactors = TxK matrix of common risk factors
# nLagsTS = Scalar indicating the number of lags to include in HAC
# estimator of variance in first-stage regression
#
# Output: Two structures including results from the two steps
# Use mat for easier linear algebra
factors = mat(riskFactors.values)
excessReturns = mat(returns.values) # Måske ikke .values
# Shape information
t, n = excessReturns.shape
nFactors = factors.shape[1]
# Time series regressions
# X = sm.add_constant(factors) # Laver X ved at inkludere en 1 vektor på faktorer
# ts_res = sm.OLS(excessReturns, X).fit() # First pass regression # Gammel
ts_res = nwRegress(excessReturns, factors, 1, nLagsTS)
beta = ts_res['bv'][1:]
# Cross-section regression
cs_params = pd.DataFrame()
cs_X = sm.add_constant(beta.T)
for iObs in range(t):
cs_params = pd.concat([cs_params, pd.DataFrame(sm.OLS(excessReturns[iObs].T, cs_X).fit().params)], axis=1)
# Risk prices and Fama-MacBeth standard errors and t-stats
RiskPrices = cs_params.mean(axis=1).T
covGamma = (cs_params.T.sub(RiskPrices).T @ cs_params.T.sub(RiskPrices)) / t ** 2
# seGamma = sqrt((cs_params.T.sub(RiskPrices)**2).sum(axis=0)/t**2)
seGamma = sqrt(diag(covGamma))
tGammaFM = RiskPrices / seGamma
# Adding a Shanken (1992) corrections as per Goyal (2012) eq. (33)
covRiskFactors = ((factors - mean(factors, axis=0)).T @ (factors - mean(factors, axis=0))) / (t - nFactors)
c = RiskPrices[1:] @ np.linalg.inv(covRiskFactors) @ RiskPrices[1:].T # Excluding the constant
covShanken = 1 / t * ((1 + c) * (t * covGamma.iloc[1:, 1:]) + covRiskFactors)
seGammaShanken = sqrt(diag(covShanken)).T
seGammaShanken = np.insert(seGammaShanken, 0, seGamma[0])
tGammaShanken = RiskPrices / seGammaShanken
# Mean and fitted excess returns
meanReturns = pd.DataFrame(mean(excessReturns, 0))
fittedValues = ( | pd.DataFrame(cs_X) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 18:49:14 2018
@author: kennedy
"""
import pandas as pd
import numpy as np
def process_time(df):
if 'timestamp' not in df.columns:
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements the global import of all data
Created on Mon Dec 26 20:51:08 2016
@author: rwilson
"""
import numpy as np
import glob
import re
import os
import csv
from itertools import repeat
import pandas as pd
import h5py
from dateutil.parser import parse
import codecs
from scipy.io import loadmat
from scipy import signal
import shelve
import pickle
import numpy as np
import pandas as pd
class utilities:
'''Collection of functions intended for data related processes.
'''
def DB_group_names(Database, group_name = None):
'''Read in group names found within group. If group is not provided the
upper folder structure will be read from.
Parameters
----------
Database : str
Relative location of database
group_name : str
The expected attribute name/s
Returns
-------
group_names : list
Group names found within the group
notes
-----
Add some additional error checks
'''
with h5py.File(Database, 'r') as h5file:
if group_name is not None:
group = h5file.get(group_name)
group_names = [key for key in group.keys()]
else:
group_names = [key for key in h5file.keys()]
return group_names
def DB_attrs_save(Database, dictionary):
'''Save attribute to database head.
Parameters
----------
Database : str
Relative location of database
dictionary : dict
Dictionary of attributes
notes
-----
Add some additional error checks
'''
print('* The following %s attributes will be updated' % Database)
with h5py.File(Database, 'r+') as h5file:
for key,item in zip(dictionary.keys(), dictionary.values()):
print('Key:', key,'| item:', item)
h5file.attrs[key] = item
def DB_attrs_load(Database, attrs_names):
'''Read attribute from database head.
Parameters
----------
Database : str
Relative location of database
attrs_names : list(str)
The expected attribute name/s
Returns
-------
dict_attri : dict
The returned dictionary of attribute/s from the database
notes
-----
Add some additional error checks
'''
dict_attrs = {}
with h5py.File(Database, 'r') as h5file:
for attrs_name in attrs_names:
# Load the database
attrs = h5file.attrs[attrs_name]
dict_attrs[attrs_name] = attrs
return dict_attrs
def DB_pd_data_load(Database, group, cols = None, whereList = None):
'''Loads in a pandas dataframe stored in group from the Database.
Parameters
----------
Database : str
Relative location of database
group : str
The expected group name
cols : list(str) / list(int)
If not None, will limit the return columns, only applicable for ``table``
format database. For ``fixed`` format database only int accepted
whereList : list of Term (or convertable) objects or slice(from, to)
The conditional import of data, example ['index>11', 'index<20'],
only applicable for ``table`` format database. For ``fixed`` format
database only a slice object is applicable and will use the row index
numbers not the index values (i.e. df.iloc vs df.loc)
Returns
-------
group_df : DataFrame
The PV data stored in the group ``PVdata`` as a pandas dataframe
TSsurvey = pd.read_hdf(h5file, 'survey20180312093545',
columns=[(1,1), (1,2)], # Load specific columns
where = ['index>11', 'index<20']) # Load index 11 -> 20
'''
with pd.HDFStore(Database, 'r+') as h5file:
# Check that the expected group name is found in the database
# group_names = [key for key in h5file.keys()]
# expected_group_name = '/'+group
# if expected_group_name not in group_names:
# raise KeyError('The %s group was not found within the %s database.' \
# %(expected_group_name, Database))
# Load the database
try:
group_df = pd.read_hdf(h5file, group, columns = cols, where = whereList)
except TypeError:
with pd.HDFStore(Database, 'r+') as h5file:
group_df = pd.read_hdf(h5file, group)
group_df = group_df.iloc[whereList, cols]
return group_df
def DB_pd_data_save(Database, group, df):
'''Saves in a pandas dataframe stored in group from the Database.
Parameters
----------
Database : str
Relative location of database
group : str
The expected group name
df : DateFrame
Pandas DataFrame to be stored in h5 file ``Database``
'''
with pd.HDFStore(Database, 'r+') as h5file:
# Save the database
df.to_hdf(h5file, group)
def PV_TS_DB_merge(CC, PVdata, mergeOnIndex=False):
'''Merge/concatenate based on the time axis. The expected structures of
'PVdata' and 'CC' DataFrames is a ``Time Stamp`` axis and a ``Time`` index
level on which the concatenation takes place.
Parameters
----------
CC : DataFrame of list(DataFrame)
Expected to contain the processed data or a list of DataFrame with
processed data. The index must be a timestam.
PVdata : DataFrame
A single DataFrame containing the corresponding perturbation information
and must contain a column ``Time Stamp``, which will be used during the
concatentation/merge.
mergeOnIndex : Default False
Merge based on the index values. Currently only works when a single ``CC``
DataFrame is provided, and not for a list of DF.
Returns
-------
PV_CC_df : DataFrame
Merged/concatenated dataframe of both PV and Coda processed data.
column_values : array
of tuples defining the multi-level indecies of the CC data.
'''
# Load the individual dataframes.
# CC = utilities.DB_pd_data_load(Database, 'CCprocessed')
# PVdata = utilities.DB_pd_data_load(Database, 'PVdata')
# Convert the Time to datetime
# Dealing with single CC dataframe
if not isinstance(CC, list): CC= [CC]
CC_list = []
col_tup = []
for CC_df in CC:
CC_df.index.set_levels(pd.to_datetime(CC_df.index.levels[2]), level='Time',
inplace=True)
# Pivot the otermost row indcies to columns
CC_test = CC_df.unstack(level=[0,1])
CC_test = CC_test.reorder_levels(
['lag', 'window', 'srcNo', 'recNo','Parameters'], axis=1)
CC_list.append(CC_test)
col_tup = col_tup + CC_test.columns.get_values().tolist()
PVdata.set_index('Time Stamp', inplace=True)
if mergeOnIndex:
PV_CC_df = pd.merge(PVdata, CC_list[0], how='inner', left_index=True,
right_index=True)
else:
PV_CC_df = pd.concat([PVdata] + CC_list, axis=1)
interpCols = PVdata.columns
PV_CC_df[interpCols] = PV_CC_df[interpCols].interpolate()
return PV_CC_df, col_tup
def DB_COL_stats(DF, colList, baseName, stats = ['mean', 'std'], norm=False):
'''Extract stats from multiple columns with a ``CommonKey``.
Parameters
----------
DF : DataFrame
Dataframe from which statistics will be generated.
colList : str
a list of columns from which the stats will be made.
baseName : str
The base name of the new columns to which '_[stats]' will be appended.
stats : list
A list of strings containing the requested stats to be generated for
columns with ``CommonKey``.
norm : list
Perform a min-max norm of the added statistic between 0 and 1.
Returns
-------
DF : DataFrame
Original dataframe plus columns containing requested stats.
'''
for stat in stats:
if isinstance(baseName, tuple):
baseName_stat = list(baseName)
baseName_stat[-1] = baseName_stat[-1]+'_'+stat
baseName_stat = tuple(baseName_stat)
else:
baseName_stat = baseName+'_'+stat
if stat is 'mean':
DF[baseName_stat] = DF.loc[:, colList].mean(axis=1)
elif stat is 'std':
DF[baseName_stat] = DF.loc[:, colList].std(axis=1)
if norm:
DF[baseName_stat] = (DF[baseName_stat] -
DF[baseName_stat].min()) / \
(DF[baseName_stat].max() -
DF[baseName_stat].min())
return DF
def CC_lag(CC_df, period=None, units='us', relVel = True, strainCol=None):
'''Convert CC lag or First Break Picked data from number of sample points
to time or relative velocity change based on:
.. math::
\dfrac{\delta v}{v} = -\dfrac{\delta t}{t}
Expected column names should either contain 'lag' in the last of a tuple,
eg. col[-1] in the case of lag correction, or 'FBP' in the case of First
Break Picking correction. If a 'FBP' correction is required, then the
correct input initial velocity should be given.
Parameters
----------
CC_df : DataFrame
Dataframe from which statistics will be generated. A three level
dataframe is expected where the lowest level
period : float (default=None)
Seconds per sample,
units : str
unit of the arg (D,s,ms,us,ns) denote the unit, which is an integer
or float number.
relVel : bool
Output the lag in terms of the relative velocity change.
strainCol : str (default=None)
The name of the strain column within ``CC_df`` which will be used
to correct the relative velocity calculation for a change in
separation distance.
Returns
-------
DF : DataFrame
Original dataframe with the lag columns modified as specified.
'''
unit_dict = {'us': 10E6, 'ms': 10E3, 'ns': 10E9, 's': 1}
# Find all the lag/FBP columns
cols = [col for col in CC_df.columns.tolist() if 'lag' in col[-1] or
'FBP' in col]
if relVel and 'FBP' in cols:
if strainCol:
Vint = CC_df.loc[0, strainCol]/ CC_df.loc[0, cols[0]]
deltaV = CC_df.loc[:, strainCol]/ CC_df.loc[:, cols[0]] - Vint
else:
Vint = 1 / (CC_df.iloc[0, :] )
deltaV = 1 / (CC_df.loc[:, cols]) - Vint
CC_df.loc[:, cols] = deltaV/Vint
return CC_df
elif relVel:
# Middle of each window
t_prop = np.array([(int(wdw[1].split('-')[0]) +
int(wdw[1].split('-')[1]))/2
for wdw in cols]) * unit_dict[units] * period
CC_df.loc[:, cols] = (CC_df.loc[:, cols] * \
period * unit_dict[units] ) / t_prop*-1
return CC_df
t_prop = 1
CC_df.loc[:, cols] = CC_df.loc[:, cols] * \
period * unit_dict[units] / t_prop
return CC_df
def CC_integration(DF, dx='index', intType='trapz'):
'''Performs the integration of multiple columns witin a dataframe which is
expected to contain row index levels 'srcNo', 'recNo' and 'Time'. Each
srcNo and recNo pair over time will be integrated and added back into the
DF.
Parameters
----------
DF : DataFrame
Dataframe of the data to be integrated which must be row indexed by
``datetime64[ns]``.
dx : str (Default 'index')
If set to 'index' than integration is performed based on the time axis
intType : str (Default 'trapz')
The type of integration to use. trapz for trapezoid, cumsum for pure
cummulative summation.
Returns
-------
DF_int : DataFrame
THe same dimensions and row indicies as DF containing the cumtrapz
integration.
'''
from scipy import integrate
import itertools
# Define empty integration dataframe
col_list = [list(col) for col in DF.columns.tolist()]
col_list_int = [tuple(col_int[:-1] + [col_int[-1]+'-int'])
for col_int in col_list]
colIndex = pd.MultiIndex.from_tuples(col_list_int, names=DF.columns.names)
DF_int = pd.DataFrame(columns=colIndex, index=DF.index)
# Extract each unique src-rec pair in the DF
src_rec_pairs = [(src, rec) for src, rec in
itertools.product(
DF.index.get_level_values('srcNo').unique().values,
DF.index.get_level_values('recNo').unique().values)]
for src_rec in src_rec_pairs:
# Generate the index which will be integrated and convert to seconds
df = DF.loc[src_rec+(slice(None),)]
if dx=='index':
x = df.index.astype(np.int64)/10**9
else:
dx = 1
x = None
if intType=='trapz':
y = df.apply(pd.to_numeric).values
y_int = integrate.cumtrapz(y, x = x, dx=1, initial=0, axis=0)
# Add data to dataframe
DF_int.loc[src_rec+(slice(None),)] = y_int
elif intType=='cumsum':
y = df.apply(pd.to_numeric)
DF_int.loc[src_rec+(slice(None),)] = y.cumsum().values
return DF_int
def CC_to_K(DF):
'''Convert all Cross-correlation coefficients to decorrelation by applying
the simple transform:
.. math::
K = 1- CC
Parameters
----------
DF : DataFrame
Dataframe with multi level columns where the cross-correlation
coefficients are expected to be named 'CC'.
Returns
-------
DF_int : DataFrame
The dataframe is returned with only the data of the columns modified.
'''
CC_cols = [col for col in DF.columns if 'CC' in col]
DF.loc[:, CC_cols] = 1 - DF.loc[:, CC_cols]
return DF
def Data_CSV_dump(DF, fileName, colNames = None, indices = None, CCtoK = False,
shiftCols=None, nthRow=None):
'''Dumps data from pandas dataframe to csv, intended for tikz plotting.
Note, the following char will be removed from the column names ,\'_\[\]%
and any row of the selected ``colNames`` containing atleast one NaN will
be filled with ``0``.
Parameters
----------
DF : DataFrame
DataFrame to be dumped
fileName : str
Name of the csv file saved to current working directory.
colNames : list
The columns to be dumped.
indices : slice
The slice object of indix values to be dumped.
CCtoK : bool (Default = False)
Convert all cross-correlation data to decorrelation. Expected column
names as tuples with the last entry equal to 'CC' or 'CC_mean'
shiftCols : list (Default = None)
List of columns to begin as zero (The first value will be subtracted
from all)
nthRow : list (Default = None)
List of columns to begin as zero (The first value will be subtracted
from all)
'''
# Remove NaN
if colNames:
DF_out = DF.loc[:, colNames].fillna(0)
else:
DF_out = DF.fillna(0)
# Take every nth row
if nthRow:
DF_out = DF_out.iloc[::nthRow]
# shift to zero all columns in shiftCols
if shiftCols:
DF_out.loc[:, shiftCols] = DF_out.loc[:, shiftCols] - \
DF_out.loc[:, shiftCols].iloc[0]
# Convert all the 'CC' columns from correlation to decorrelation
if CCtoK:
col_tup = [col for col in DF_out.columns if isinstance(col, tuple)]
CCcols = [col for col in col_tup if col[-1]=='CC' or col[-1]=='CC_mean']
DF_out.loc[:, CCcols] = 1 - DF_out.loc[:, CCcols]
# Remove all non-compatiable columns from the database columns
renameCheck = {col: re.sub('[\'_\[\]%,]', '',str(col)).rstrip() for
col in DF_out.columns}
DF_out.rename(columns = renameCheck, inplace=True)
DF_out.to_csv(fileName, index_label = 'index')
def Data_atGT(DF, targetCols, outputCols, points, pointsCol, shiftCols=None):
'''Extracts first datapoint greater than a defined column value
Parameters
----------
DF : DataFrame
DataFrame to be dumped
targetCols : list
list of all target columns in ``DF``
outputCols : list
list of output column names to use in output dataframe ``DF_out``. Must
be of equal length to targetCols.
points : list
list of points at which the first values > should be extracted from
each entry in ``targetCols``.
pointsCol : str
list of output column names to use in output dataframe ``DF_out``. Must
be of equal length to targetCols.
shiftCols : list (Default = None)
List of columns to begin as zero (The first value will be subtracted
from all)
Returns
-------
df_trans : DataFrame
Output dataframe containing the requested points
'''
# Output values at the transition
# trans = [2.6, 3.3, 4, 6.1, 7.1, 8.8, 10, 11, 11.9, 12.9]
# Make the dataframe for storage
df_trans = pd.DataFrame(columns=outputCols)
# Remove NaN
DF_out = DF.copy().fillna(0)
# shift to zero all columns in shiftCols
if shiftCols:
DF_out.loc[:, shiftCols] = DF_out.loc[:, shiftCols] - \
DF_out.loc[:, shiftCols].iloc[0]
setattr(df_trans, pointsCol, points)
#df_trans.pointsCol = points
for idx, col in enumerate(points):
mask = DF_out[pointsCol]>col
temp_data = DF_out.loc[mask].iloc[0]
for outputCol, targetCol in zip(outputCols,targetCols):
if isinstance(targetCol,tuple):
df_trans.loc[idx, outputCol] = temp_data.loc[[targetCol]].astype(float).values[0]
else:
df_trans.loc[idx, outputCol] = temp_data.loc[targetCol]
return df_trans
def TS_Time(DF, secPerSamp, traceSlice, resampleStr = '1 us', csvDump = True,
wdwPos = None, fileName = 'traces.csv'):
'''Takes raw time series dataframe, allowing the slicing, resampling and
re-indexing. Output times are in seconds.
Parameters
----------
DF : DataFrame
DataFrame to be modified.
secPerSamp : float
The sampling period or seconds per sample required to generate a time
index for the dataframe.
traceSlice : slice
The slice object to extract from the DF.
resampleStr : str (default = '1 us')
The resample string to reduce the size of each trace.
csvDump : bool (default = True)
Save data to ``traces.csv`` in pwd in the order, time [sec], trace1, trace2,....
wdwPos : list
List of [start, stop] window positions in no. of smaples
fileName : str (Default = 'traces.csv')
Name of output csv file.
Returns
-------
DF : DataFrame
Output dataframe.
wdwTimes : list
List of lists of the window positions in seconds
'''
# Add time index
TdeltaIdx = pd.timedelta_range(start='0',
freq = '%.9f ms' % (secPerSamp*1000),
periods = DF.shape[0])
if wdwPos is not None:
wdwPos[:,1] = wdwPos[:,1]-1 # Correct indexing
wdwTimes = TdeltaIdx[wdwPos].astype(float)*1E-9
wdwTimes = [tuple(wdw*1000) for wdw in wdwTimes]
DF = DF.loc[:, (slice(None), slice(None),
DF.columns.get_level_values('Time')[traceSlice])]
DF['time'] = TdeltaIdx
DF.set_index('time', inplace=True)
DF = DF.resample(resampleStr).sum()
DF.reset_index(inplace=True)
DF['time'] = DF['time'].values.astype(float)*1E-9
if csvDump:
DF.to_csv(fileName, header = False, index=False)
return DF, wdwTimes
def hdf_csv_dump(DB_fdl):
''' Dumps the processed databases to CC, PV, TShdrs to csv files. Note
this function should be run in the run folder, not the database folder
---inputs---
DB_fdl: relative or absolute location to the folder where all database
files are located
'''
def hdf_to_csv(hdf_DB, tbl_name):
''' Save hdf DB to csv
hdf_DB: HDF5 database rel of abs path and name
tbl_name: Name of table in database
'''
df = pd.read_hdf(hdf_DB, tbl_name)
df.to_csv(DB_fdl+tbl_name+'.csv')
# Expected HDF5 table names
CC_tbl_name = 'CC'
PV_tbl_name = 'PV_df'
PV_full_tbl_name = 'PV_df_full'
TShdrs_tbl_name = 'TShdrs'
TS_df_tbl_name = 'TS_df'
# Expected HDF5 db names
DB_tbl = DB_fdl+'DB_tbl_processed.h5'
TS_cut = DB_fdl+'TS_cut.h5'
# Load expected param file
output = open(DB_fdl+'param.txt', 'rb')
param = pickle.load(output)
# Dump all expected DB tables to csv files
hdf_to_csv(DB_tbl, PV_tbl_name)
if param['matched']:
hdf_to_csv(DB_tbl, PV_full_tbl_name)
hdf_to_csv(DB_tbl, TShdrs_tbl_name)
hdf_to_csv(DB_tbl, CC_tbl_name)
def run_dataLoad(DB_fdl):
''' Loads a previous processing session into memory ready for analysis.
- Inputs -
DB_fdl: input folder holding the expected databases in the form
'DB_fld/'
- Outputs -
PV_df: Main database holding PV, and CC data
TS_DB: Database of TS data
PV_df_full: Database including all PV data, empty if original PV and TS
data was coincident already.
'''
def from_pkl(fname):
''' Load pickel files
fname: file name rel or abs path
'''
try:
output = open(fname, 'rb')
obj_dict = pickle.load(output)
return obj_dict
except EOFError:
return False
def from_hdf5(DB_tbl, tbl_name):
'''Save expected df to hdf5 database
'''
df = pd.read_hdf(DB_tbl, tbl_name)
return df
# ------------------ Setup ------------------ #
# Load the param file data
param = from_pkl(DB_fdl+'param.txt')
# The database names
DB_tbl = pd.HDFStore(DB_fdl+'DB_tbl_processed.h5')
TS_cut = pd.HDFStore(DB_fdl+'TS_cut.h5')
# tabel names
PV_tbl_name = 'PV_df'
PV_full_tbl_name = 'PV_df_full'
TS_df_tbl_name = 'TS_df'
PV_df = from_hdf5(DB_tbl, PV_tbl_name)
if 'TSmatched' in param and param['TSmatched']:
PV_df_full = from_hdf5(DB_tbl, PV_full_tbl_name)
# TS_df = from_hdf5(TS_cut, TS_df_tbl_name)
TS_DB = from_hdf5(TS_cut, TS_df_tbl_name+'DB')
# TShdrs = from_hdf5(DB_tbl, TShdrs_tbl_name)
# CC = from_hdf5(DB_tbl, CC_tbl_name)
# Close the DB's
DB_tbl.close()
TS_cut.close()
return PV_df, TS_DB, PV_df_full, param
class data_import:
"""This class handels the import and basic processing of all
user imput data. The core data streams are the time series information
(TS) and the corresponding Perturbation Vectors (PV)
Parameters
----------
TSfpath : str
Defines the relative or absolute location of the TS data folder
TSlocL : list (default = None)
List of the relative or absolute location of the TS data files
PVloc : str
Defines the relative or absolute location of the PV's
Database : str
Defines a common hdf5 database name ``Database.h5``
import_dtype: str
Defines several raw data types, "bin_par": for data in a binary single
trace per file and header data in .par files, "Shell_format": all data in a
single csv file (both PV and TS), 'NoTShdrer_format'
"NoTShdrer_format"``.
notes
-----
The output of this class should be a single ``Database.h5`` database in the
run directory, containing all relevant data. All user defined parameters are
assigned to the attribues of the database head.
Examples
--------
>>> import h5py
>>> # Reading the user defined parameters from the database attributes
>>> with h5py.File('Database.h5', 'r') as h5file:
>>> print(dict(h5file.attrs.items()))
"""
def __init__(self, TSfpath, PVloc, import_dtype, param = None):
self.TSfpath = TSfpath
self.TSlocL = None
self.PVloc = PVloc
self.Database = 'Database.h5'
self.import_dtype = import_dtype
self.param = param
# -------------------- Reading files in folders --------------------
def TSfiles(self):
'''This function lists the files in the TSfpath folder location reading
all of the information contained within. The structure of this
data is checked and the appropriate sub-function initiated based on the
user defined parameter 'import_dtype'
Parameters
----------
headerDB : DataFrame
Database of header information
TSdataList : list
List of TS data file relative locations
Returns
-------
headerDB : DataFrame or dict(DataFrames)
DataFrame of all header file information or dict of DataFrames. The
structure should be
index | srcNo | recNo | Time | "other header info"
'''
# Assign list of TS data files to self.TSlocL
self.TSlocL = self.read_finfd(self.TSfpath)
if self.import_dtype == 'CSIRO':
headerDB = self.TSfilesCSIRO()
print('* CSIRO survey TS data loaded')
return headerDB
elif self.import_dtype == 'bin_par':
headerDB = self.TSfilesPar()
print('* TUDelft .Par and Binary TS data loaded')
return headerDB
elif self.import_dtype == 'Shell_format':
headerDB = self.TSfilesPV_TS()
print('* Shell format TS data loaded')
return headerDB
elif self.import_dtype == 'HDF5':
from shutil import copyfile
headerDB = None
print('* HDF5 format expected ')
try:
copyfile(self.TSfpath, self.Database)
except FileNotFoundError:
return None
return headerDB
def read_finfd(self, file_loc):
'''Basic function to read files in folder and sort by numeric end digits
Parameters
----------
file_loc: str
Path to files.
'''
#print('Location of the time series data file/s is:', file_loc)
files = glob.glob(file_loc + '*')
files.sort(key=lambda var: [int(x) if
x.isdigit() else x
for x in
re.findall(r'[^0-9]|[0-9]+', var)])
return files
def TSfilesPar(self):
'''This function is intended to perform the TSfile function operations
for a folder containing .par headerfiles and associated binary files.
Parameters
----------
List of all header .par and Binary files
Returns
-------
hdr_df Database of all header file information, multiple
index file name. Must contain mandatory columns
``['srcNo', 'recNo', 'Time', 'Survey']``.
TSdataL List of data files including relative location.
TODO:
* Add check for various header file types .txt ..etc
'''
# Split .par and data files
TSpar = [elem for elem in self.TSlocL if ".par" in elem]
self.TSlocL = [elem for elem in self.TSlocL if ".par" not in elem]
regex = re.compile(r'\d+')
# Raise error if .par no != data file no
fileNo_par = [regex.findall(file)[-1] for file in TSpar]
fileNo_data = [regex.findall(file)[-1] for file in self.TSlocL]
for par, data in zip(fileNo_par, fileNo_data):
if par != data:
print(par, data)
raise Warning(par, '.par not equal to data', data)
TSdata_modTime = [os.path.getmtime(elem) for elem in self.TSlocL]
# Load header files into database
fileNokeys = [x.rsplit('/', 1)[-1] for x in TSpar]
header = ['recNo', 'numSamp', 'sentv', 'sampFeq',
'n/a', 'offset', 'n/a', 'count[sec]']
df_list = [pd.read_csv(file, sep='\t', names=header) for file in TSpar]
hdr_df = pd.concat(df_list, keys=fileNokeys)
maxNorec = np.arange(hdr_df.recNo.min(),hdr_df.recNo.max()+1)
hdr_df['Survey'] = [int(val) for val in fileNo_data for _ in
maxNorec]
hdr_df.recNo = hdr_df.recNo.astype('int')
hdr_df['srcNo'] = int(1)
hdr_df['Time'] = pd.to_datetime(hdr_df['count[sec]'], unit='s',
origin = self.param['TSstart_date'], dayfirst=True)
# Re-order list based on expected order of columns
header.insert(0, 'srcNo')
header.insert(2, 'Time')
header.insert(3, 'Survey')
hdr_df = hdr_df[header]
# # duplicate time create based on the noRec
# noRec = len(hdr_df.recNo.unique()) # No of receivers
# temp = [x for item in TSdata_modTime
# for x in repeat(item,noRec)]
# hdr_df['fileMod'] = temp
return hdr_df
def TSfilesPV_TS(self):
'''Loads in impuse response header data from a single folder.
The expected format is the shell data structure.
Parameters
----------
self.TSlocL : list
List of all files within given folder
Returns
-------
df_hdr : DataFrame
Database of all header file information for each file. Must contain
mandatory columns ``['srcNo', 'recNo', 'Time', 'Survey']``.
'''
def getrows(filename, rowNos):
'''Outputs a list of the contents of each rwoNos
Parameters
----------
filename : str
relative or absolute path to file
rowNos : list
list of int of increasing order defining the row numbers to read.
'''
with open(filename, 'r', encoding='ISO-8859-1') as f:
datareader = csv.reader(f)
count = 0
rows = []
for row in datareader:
if count in rowNos:
rows.append(row)
count +=1
if count > rowNos[-1]:
return rows
# Expected formate
rowNos = [4,6,7,8,9,10]
columns = ['srcNo','recNo','Time', 'Survey', 'Averages', 'Exitation Freq',
'Vertical gain', 'Delay','Vertical Offset','Sample interval']
df_hdr = pd.DataFrame(columns=columns)
for idx, file in enumerate(self.TSlocL):
# Read in the file header rows
file_hdr = getrows(file, rowNos)
# Formate the rows
items = [1, # srcNo
1, # recNo
file_hdr[0][0], # Time
int(idx), # Survey
int(file_hdr[1][1]), # Averages
float(file_hdr[1][3]), # Exitation Freq
float(file_hdr[2][1]), # Vertical gain
float(file_hdr[3][1]), # Delay
float(file_hdr[4][1]), # Vertical Offset
float(file_hdr[5][1])] # Sample interval
# Store within dataframe
df_hdr.loc[idx] = items
return df_hdr
def TSfilesCSIRO(self):
'''Loads in impulse response header data from multiple subfolders throughout
a survey. Each subfolder should be of the format "sometext"YYYYMMDDHHMMSS.
Within each subfolder are files for each source receiver pair.
Parameters
----------
self.TSlocL : list
List of all files within given
Returns
-------
header_dict : dict
Database of all header file information, multiple
index file name
TdeltaIdx : timedelta64[ns]
Time delta index of length equal to trace length of TS data
self.TSlocL : list of lists
Updated list of lists of each survey folders contents.
'''
# list for all surveys
TSlocL_temp = []
# Define the columns of the dataframe
columns = ['srcNo','recNo','Time','TracePoints','TSamp','TimeUnits',
'AmpToVolts','TraceMaxVolts','PTime','STime']
# Create the dataframe for all headers to be stored
header_dict = {}
for survey in self.TSlocL:
survey_files = [ file for file in self.read_finfd(survey + '/') if '.atf' in file]
TSlocL_temp.append(survey_files)
df1 = pd.DataFrame(columns=columns)
count = 0
for file in survey_files:
with open(file, newline='') as f:
reader = csv.reader(f)
next(reader)
header = next(reader)
f.close()
header = re.split(r"[\;]+", header[0]) # split the header up
header_split = [re.split(r'[\=]', head) for head in header][:-1] # split the header up
header_split = [ [head[0].split()[0], head[1]] for head in header_split] # Remove white space
# Extract the values
items = [ item[1] for item in header_split]
# Combine date and time:
items[1] = items[0]+' '+items[1]
items = items[1:]
# Convert datatypes
for idx,item in enumerate(items):
try:
items[idx] = np.float(item)
except ValueError:
items[idx] = pd.to_datetime(item, dayfirst=True) # parse(item)
# Add srcNo and recNo to header info.
srcNo = re.split(r'[_.]',file)[-3]
recNo = re.split(r'[_.]',file)[-2]
items.insert(0, int(recNo))
items.insert(0, int(srcNo))
df1.loc[count] = items
count += 1
header_dict[re.split(r'[/]',survey)[-1]] = df1
# Create time delta array
if df1['TimeUnits'][0] == 1.00000e-006:
freq = 'us'
else:
freq = 'ms'
TdeltaIdx = pd.timedelta_range(start = '0',
freq = freq,
periods = df1['TracePoints'][0])
# Redefine the list of surveys to a list of lists of survey files
self.TSlocL = TSlocL_temp
return header_dict
# -------------------- Loading TS data into memory --------------------
def TSload(self, TShdrs):
''' Load the list of TS files ``TSflist`` found and output a
matrix storing TS data columnweise.
Parameters
----------
TShdrs: DataFrame
A database of each file name containing the corresponding header. This
is output from the function ``TSfiles``.
TSflist: list
A list of the files within the ``TSfpath`` folder location. This
is output from the function ``TSfiles``.
Returns
-------
TSdataMtx : numpy array or ``None``
A columnweise stored matrix of traces for a single receiver, or ``None``
if a multiple receiver survey is detected. In this case all TS data
will be saved into a hdf5 database 'TSdata.h5'.
Note
----
All imported ts data will be have any linear trend removed before storage
to the raw database.
'''
if self.import_dtype == 'CSIRO':
print('* TS data written to database.h5')
TSdataMtx = self.TSsurveys(TShdrs)
return TSdataMtx
elif self.import_dtype == 'bin_par':
TSdataMtx = self.TSloadBin(TShdrs)
print('* Binary data TS files loaded into matrix')
return TSdataMtx
elif self.import_dtype == 'Shell_format':
TSdataMtx = self.TSloadPV_TS(TShdrs)
print('* Shell format TS csv files loaded')
return TSdataMtx
elif self.import_dtype == 'HDF5':
print('* No TS data found')
return None
def TSsurveys(self, hdr_df):
'''Load TS data from a folder with sub-folders for each survey. The sub-
folders should be named 'sometext'(unique_number)'
(e.g. 'survey20180312093545'). The individual csv files must be
named 'sometext'(unique_number)_(sourceNo)_(receverNo).(csv type formate)
(e.g. 'survey20180312093545_0001_01.csv'). A hdf5 database will be saved
with the following group structure.
Database.h5
│
TSdata
│
└───survey20180312093545
│ Table
└───survey20180312093555
│ Table
: :
Each table includes header data from the csv files imported from the second
line of the csv files as Date=12-03-2018; Time=09:35:45.833000;
TracePoints=4096; TSamp=0.10000; TimeUnits=1.00000e-006;
AmpToVolts=1.0000; TraceMaxVolts=5.0000; PTime=0.00000; STime=0.00000;
Parameters
----------
hdf_df : dict of DataFrames
A database of header information for each Time-series recorded. As a
minimum must contain the first three columns of 'srcNo', 'recNo' and
'time'. The remaining header info can be any order.
self.TSlocL : list of lists
List of lists of the files within each survey
Returns
-------
_ : None
None indicating that data is stored in a HDF5 file.
notes
-----
No longer is the TdeltaIdx added to each dataframe, thus I should figure
out how best to store this information into the h5 file.
Examples
--------
>>> import pandas as pd
>>> import h5py
>>> with h5py.File('Database.h5', 'a') as h5file:
>>> TSsurvey = pd.read_hdf(h5file, 'survey20180312093545') # basic load
>>> # Load specific source-receiver pair for window between 11 and 500
>>> TSsurvey = pd.read_hdf(h5file, 'survey20180312093545',
columns=[(1,1), (1,2)], # Load specific columns
where = ['index>11', 'index<20']) # Load index 11 -> 20
'''
# Folder setup structure
file = self.Database
group = 'TSdata/'
# Read header keys from the first file
header_attributes = list(hdr_df[list(hdr_df.keys())[0]].keys())
# Open link to hdf5 database
with pd.HDFStore(file,'w') as h5file: # open the hdf data store link
for survey in self.TSlocL:
TS_data_list = []
survey_no = re.split(r'[/._]',survey[0])[-4]
# For each file in survey
for file in survey:
# read source receiver numbers from file name
srcNo = int(re.split(r'[_.]',file)[-3])
recNo = int(re.split(r'[_.]',file)[-2])
#----- Read from dictionary the surveys headers -----#
head_df = hdr_df[survey_no]
head_df = head_df.loc[(head_df['srcNo']==srcNo) & (head_df['recNo']==recNo)]
#----- Load in the trace -----#
temp1 = [head_df[col].astype(int).values for col in header_attributes[0:2]]
temp2 = [head_df[col].astype(int).values for col in [header_attributes[2]]]
temp3 = [head_df[col].astype(float).values for col in header_attributes[3:]]
header_vals = temp1 + temp2 + temp3
TSsurvey = pd.read_csv(file,skiprows=3, names=['temp'])
# Place header into the multiindex
TSsurvey.columns = pd.MultiIndex.from_product(header_vals,
names = header_attributes)
TS_data_list.append(TSsurvey)
#----- Join all into the full survey -----#
TSsurvey = pd.concat(TS_data_list, axis=1, join_axes=[TSsurvey.index])
# Set index to timedelta and drop the default
#TSsurvey.set_index(TdeltaIdx, inplace=True, drop=True)
#TSsurvey['Time Stamp'] = TdeltaIdx
# store into the database under name of survey and header data
h5file.put(group + survey_no, TSsurvey, format='table')
return None
def TSloadBin(self, TShdrs):
'''Load in the binary file formate as expected from the
'import_dtype'= 'bin_par'.
'''
# Load TS data into matrix
TS_len = int(len(TShdrs.recNo.unique())*TShdrs.numSamp.max())
hdr_col = TShdrs.columns[:7].tolist()
#recNo_list = TShdrs.recNo.unique().tolist()
df_list = []
for file in self.TSlocL:
with open(file, 'r') as fid:
trace = np.fromfile(fid, np.int16)[-TS_len:] #* v_sens
# For each header entry
for hdr in TShdrs.loc[(file.split('/')[1]+'.par',
slice(None)), :].index.values:
# Create dataframe for storage with header info:
values = [ [TShdrs.loc[hdr, col]] for col in hdr_col]
cols = pd.MultiIndex.from_product(values, names = hdr_col)
recSlice = slice(int(hdr[1] * TShdrs.loc[hdr, 'numSamp']),
int((hdr[1]+1) * TShdrs.loc[hdr, 'numSamp']))
df = pd.DataFrame(trace[recSlice], columns = cols) * TShdrs.loc[hdr,'sentv']/3200 # Vsensitivty correction
df_list.append(df)
TSdata = pd.concat(df_list, axis=1, join_axes=[df.index])
with pd.HDFStore(self.Database, 'w') as h5file:
TSdata.to_hdf(h5file, 'TSdata')
return None
def TSloadPV_TS(self, TShdrs):
'''Read in time-series data and save to database
'''
TS_data_list = []
for idx, survey in enumerate(self.TSlocL):
hdr_values = [[item] for item in TShdrs.loc[idx].values]
TSsurvey = pd.read_csv(survey, skiprows = 11, names = ['temp'])
TSsurvey.columns = pd.MultiIndex. \
from_product(hdr_values, names = list(TShdrs.columns.values))
TS_data_list.append(TSsurvey)
TSsurvey = pd.concat(TS_data_list, axis=1, join_axes=[TSsurvey.index])
TSsurvey = TSsurvey * TShdrs['Vertical gain'].values # Correct for gain
# TSsurvey = TSsurvey - TShdrs['Delay'].values TODO: confrim the correction for the delay time !!!!
TSsurvey = TSsurvey.apply(signal.detrend, axis=0) # Remove linear trend from traces
with pd.HDFStore(self.Database,'w') as h5file:
h5file.put('TSdata', TSsurvey, format='fixed')
return None
# -------------------- Loading PV data into memory --------------------
def PVload(self):
'''Load Perturbation Vectors into a single database and store in
'Database.h5' within group 'PVdata'.
Database.h5
│
PVdata
Table
Parameters
----------
self.PVloc : str
Path to PV data file/files
self.import_dtype : str
Indication of the data type, either ``['.xls', 'bin_par', 'CSIRO']``
or the 'Shell_format'.
'''
print('* Location of the perturbation data file/s is:', self.PVloc)
if '.xls' in self.PVloc:
df = pd.read_excel(self.PVloc)
elif self.import_dtype == 'bin_par':
df = | pd.read_csv(self.PVloc, sep=';', header=[1, 2]) | pandas.read_csv |
import pandas as pd
import xarray as xr
import re
import numpy as np
import datetime as dt
class AWS:
'''This class represents an Automatic Weather Station and its time series'''
def __init__(self, name, code, lat, lon, elev):
self.name = name
self.code = code
self.lat = lat
self.lon = lon
self.elev = elev
self.atmvar = dict()
def add_atmvar(self, name, time_series_dataarray):
self.atmvar[name] = time_series_dataarray
class AWSWriter:
'''This class is responsible for saving a group of AWS as a .csv file'''
pass
class AWSWiscReader:
'''This class reads an AWS from a .txt file from wisc data'''
def read_aws(self, filepath):
aws = self.read_metadata(filepath)
da = self.read_time_series(filepath)
aws.add_atmvar('T2m', da)
return aws
def read_metadata(self, filepath):
with open(filepath) as f:
firstline = f.readline().rstrip()
first_match_obj = re.match( r'Year: (.*) Month: (.*) ID: (.*) ARGOS: (.*) Name: (.*)', firstline)
secondline = f.readline().rstrip()
second_match_obj = re.match( r'Lat: (.*) Lon: (.*) Elev: (.*)', secondline)
return AWS( first_match_obj.group(5).strip(),
first_match_obj.group(3).strip(),
second_match_obj.group(1).strip(),
second_match_obj.group(2).strip(),
second_match_obj.group(3).strip(),
)
def read_time_series(self, filepath):
df = pd.read_csv(filepath, skiprows=2, header=None, sep='\s+', na_values=444.0)
temp = df[5]
time = pd.date_range("2021-12-01", "2021-12-31 23:50:00", freq="10min")
da = xr.DataArray(temp, coords=[time], dims=['time'])
return da
class AWSHalleyReader:
'''This class reads an AWS from a .txt file from halley or rothera station'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['Temp_ext_Avg', 'Temp_hmp_Avg', 'Temp_box_Avg']
for varname in varnames:
da_min = self.read_time_series(filepath, varname)
da = self.resample_time_series(da_min)
aws.add_atmvar(varname, da)
return aws
def read_time_series(self, filepath, varname):
df = pd.read_csv(filepath, sep=',')
time = pd.to_datetime(df['TIMESTAMP'])
data = df[varname]
da_min = xr.DataArray(data, coords=[time], dims='time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSArgusReader:
'''This class reads an AWS from a .txt file from ARGUS Australian station'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['AIR_TEMPERATURE_1M', 'AIR_TEMPERATURE_2M', 'AIR_TEMPERATURE_4M']
for varname in varnames:
da_min = self.read_time_series(filepath, varname)
da = self.resample_time_series(da_min)
aws.add_atmvar(varname, da)
return aws
def read_time_series(self, filepath, varname):
df = pd.read_csv(filepath, sep=',')
time = pd.to_datetime(df['OBSERVATION_DATE'])
data = df[varname].replace(0, np.nan)
da_min = xr.DataArray(data, coords=[time], dims=['time']).sortby('time')
return da_min
def resample_time_series(self, da_min):
da = da_min.resample(time='10min', closed='right', label='right', skipna=True).mean()[1:-1]
return da
class AWSNOAAReader:
'''This class reads an AWS from a .txt file from NOAA data'''
def read_aws(self, filepath):
aws = AWS(None, None, None, None, None)
varnames = ['TEMPERATURE at 2 Meters', 'TEMPERATURE at 10 Meters', 'TEMPERATURE at Tower Top']
varcolumns = [10, 11, 12]
for (n, c) in zip(varnames, varcolumns):
da_min = self.read_time_series(filepath, c)
da = self.resample_time_series(da_min)
aws.add_atmvar(n, da)
return aws
def read_time_series(self, filepath, varcolumn):
df = pd.read_csv(filepath, header=None, sep='\s+', parse_dates={'datetime':[1,2,3,4,5]})
df['datetime'] = | pd.to_datetime(df['datetime'], format='%Y %m %d %H %M') | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
| tm.assert_frame_equal(chunks[2], df[4:]) | pandas.util.testing.assert_frame_equal |
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
isna,
)
import pandas._testing as tm
class TestDataFrameCov:
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame["A"][:5] = np.nan
frame["B"][5:10] = np.nan
result = frame.cov(min_periods=len(frame) - 8)
expected = frame.cov()
expected.loc["A", "B"] = np.nan
expected.loc["B", "A"] = np.nan
tm.assert_frame_equal(result, expected)
# regular
result = frame.cov()
expected = frame["A"].cov(frame["C"])
tm.assert_almost_equal(result["A"]["C"], expected)
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(
np.cov(df.values.T).reshape((1, 1)), index=df.columns, columns=df.columns
)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(
np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns,
columns=df.columns,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("test_ddof", [None, 0, 1, 2, 3])
def test_cov_ddof(self, test_ddof):
# GH#34611
np_array1 = np.random.rand(10)
np_array2 = np.random.rand(10)
df = DataFrame({0: np_array1, 1: np_array2})
result = df.cov(ddof=test_ddof)
expected_np = np.cov(np_array1, np_array2, ddof=test_ddof)
expected = DataFrame(expected_np)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"other_column", [pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])]
)
def test_cov_nullable_integer(self, other_column):
# https://github.com/pandas-dev/pandas/issues/33803
data = DataFrame({"a": pd.array([1, 2, None]), "b": other_column})
result = data.cov()
arr = np.array([[0.5, 0.5], [0.5, 1.0]])
expected = DataFrame(arr, columns=["a", "b"], index=["a", "b"])
tm.assert_frame_equal(result, expected)
class TestDataFrameCorr:
# DataFrame.corr(), as opposed to DataFrame.corrwith
@pytest.mark.parametrize("method", ["pearson", "kendall", "spearman"])
@td.skip_if_no_scipy
def test_corr_scipy_method(self, float_frame, method):
float_frame["A"][:5] = np.nan
float_frame["B"][5:10] = np.nan
float_frame["A"][:10] = float_frame["A"][10:20]
correls = float_frame.corr(method=method)
expected = float_frame["A"].corr(float_frame["C"], method=method)
tm.assert_almost_equal(correls["A"]["C"], expected)
# ---------------------------------------------------------------------
def test_corr_non_numeric(self, float_string_frame):
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ["A", "B", "C", "D"]].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame(
{
"A": [1, 1.5, 1, np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, np.nan, 1, 1.5, 1],
"C": [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
}
)
rs = df.corr(meth)
assert isna(rs.loc["A", "B"])
assert isna(rs.loc["B", "A"])
assert rs.loc["A", "A"] == 1
assert rs.loc["B", "B"] == 1
assert isna(rs.loc["C", "C"])
@pytest.mark.parametrize("meth", ["pearson", "spearman"])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame(
{
"A": [1, 1, 1, np.nan, np.nan, np.nan],
"B": [np.nan, np.nan, np.nan, 1, 1, 1],
}
)
rs = df.corr(meth)
assert isna(rs.values).all()
@td.skip_if_no_scipy
@pytest.mark.parametrize("meth", ["pearson", "kendall", "spearman"])
def test_corr_int_and_boolean(self, meth):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=["a", "b"], columns=["a", "b"])
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("method", ["cov", "corr"])
def test_corr_cov_independent_index_column(self, method):
# GH#14617
df = DataFrame(np.random.randn(4 * 10).reshape(10, 4), columns=list("abcd"))
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH#22298
df = DataFrame(np.random.normal(size=(10, 2)))
msg = "method must be either 'pearson', 'spearman', 'kendall', or a callable, "
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_corr_int(self):
# dtypes other than float64 GH#1761
df = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df.cov()
df.corr()
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"nullable_column", [pd.array([1, 2, 3]), pd.array([1, 2, None])]
)
@pytest.mark.parametrize(
"other_column",
[pd.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]), np.array([1.0, 2.0, np.nan])],
)
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_nullable_integer(self, nullable_column, other_column, method):
# https://github.com/pandas-dev/pandas/issues/33803
data = DataFrame({"a": nullable_column, "b": other_column})
result = data.corr(method=method)
expected = DataFrame(np.ones((2, 2)), columns=["a", "b"], index=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_corr_item_cache(self):
# Check that corr does not lead to incorrect entries in item_cache
df = DataFrame({"A": range(10)})
df["B"] = range(10)[::-1]
ser = df["A"] # populate item_cache
assert len(df._mgr.arrays) == 2 # i.e. 2 blocks
_ = df.corr()
# Check that the corr didn't break link between ser and df
ser.values[0] = 99
assert df.loc[0, "A"] == 99
assert df["A"] is ser
assert df.values[0, 0] == 99
@pytest.mark.parametrize("length", [2, 20, 200, 2000])
def test_corr_for_constant_columns(self, length):
# GH: 37448
df = DataFrame(length * [[0.4, 0.1]], columns=["A", "B"])
result = df.corr()
expected = DataFrame(
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
def test_calc_corr_small_numbers(self):
# GH: 37452
df = DataFrame(
{"A": [1.0e-20, 2.0e-20, 3.0e-20], "B": [1.0e-20, 2.0e-20, 3.0e-20]}
)
result = df.corr()
expected = DataFrame({"A": [1.0, 1.0], "B": [1.0, 1.0]}, index=["A", "B"])
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize("method", ["pearson", "spearman", "kendall"])
def test_corr_min_periods_greater_than_length(self, method):
df = DataFrame({"A": [1, 2], "B": [1, 2]})
result = df.corr(method=method, min_periods=3)
expected = DataFrame(
{"A": [np.nan, np.nan], "B": [np.nan, np.nan]}, index=["A", "B"]
)
tm.assert_frame_equal(result, expected)
class TestDataFrameCorrWith:
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b["B"]
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr["A"], a["A"].corr(b["A"]))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped["A"], a["A"].corr(b["A"]))
assert "B" not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ["a", "b", "c", "d", "e"]
columns = ["one", "two", "three", "four"]
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row], df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ["A", "B", "C", "D"]
df1["obj"] = "foo"
df2["obj"] = "bar"
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame["A"])
expected = datetime_frame.apply(datetime_frame["A"].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=["a"])
df2 = DataFrame(np.arange(10000) ** 2, columns=["a"])
c1 = df1.corrwith(df2)["a"]
c2 = np.corrcoef(df1["a"], df2["a"])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH#18570
df = DataFrame(
{"a": [1, 4, 3, 2], "b": [4, 6, 7, 3], "c": ["a", "b", "c", "d"]}
)
s = Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df["a"].corr(s), df["b"].corr(s)]
expected = Series(data=corrs, index=["a", "b"])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = DataFrame(np.random.random(size=(10, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.random(size=(10, 3)), columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH#21925
df1 = DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
def test_corr_numerical_instabilities(self):
# GH#45640
df = DataFrame([[0.2, 0.4], [0.4, 0.2]])
result = df.corr()
expected = | DataFrame({0: [1.0, -1.0], 1: [-1.0, 1.0]}) | pandas.DataFrame |
import abc
from unittest.mock import Mock
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.base import BaseTransformer
class TestBaseTransformer:
def test_get_subclasses(self):
"""Test the ``get_subclasses`` method.
Validate that any subclass of the ``BaseTransformer`` is returned by the
``get_subclasses`` method except if it also inherits from the ``ABC`` class.
Setup:
- create a ``Parent`` class which inherits from ``BaseTransformer`` and ``ABC``.
- create a ``Child`` class which inherits from ``Parent``.
Output:
- a list of classes including the ``Child`` class, but NOT including the ``Parent``.
"""
# Setup
class Parent(BaseTransformer, abc.ABC):
pass
class Child(Parent):
pass
# Run
subclasses = BaseTransformer.get_subclasses()
# Assert
assert Child in subclasses
assert Parent not in subclasses
def test_get_input_type(self):
"""Test the ``get_input_type`` method.
This method should return the value defined in the ``INPUT_TYPE`` of the child classes.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``,
containing only a ``INPUT_TYPE`` attribute.
Output:
- the string stored in the ``INPUT_TYPE`` attribute.
"""
# Setup
class Dummy(BaseTransformer):
INPUT_TYPE = 'categorical'
# Run
input_type = Dummy.get_input_type()
# Assert
assert input_type == 'categorical'
def test__add_prefix_none(self):
"""Test the ``_add_prefix`` method when passed a ``None``.
The method should return ``None`` when ``None`` is passed as an argument.
Input:
- a None value.
Output:
- a None value.
"""
# Setup
dictionary = None
base_transformer = BaseTransformer()
# Run
output = base_transformer._add_prefix(dictionary)
# Assert
assert output is None
def test__add_prefix_dictionary(self):
"""Test the ``_add_prefix`` method when passed a dictionary.
When passed a dictionary, the method should add ``column_prefix`` to the
beginning of the keys of the dictionary, separated by a dot.
Setup:
- set the ``column_prefix`` of the ``BaseTransformer`` to ``'column_name'``.
Input:
- a dictionary of strings to strings.
Output:
- the input dictionary with ``column_prefix`` added to the beginning of the keys.
"""
# Setup
dictionary = {
'day': 'numerical',
'month': 'categorical',
'year': 'numerical'
}
transformer = BaseTransformer()
transformer.column_prefix = 'column_name'
# Run
output = transformer._add_prefix(dictionary)
# Assert
expected = {
'column_name.day': 'numerical',
'column_name.month': 'categorical',
'column_name.year': 'numerical'
}
assert output == expected
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the value stored in the
``OUTPUT_TYPES`` attribute.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer`` where:
- ``column_prefix`` is set to ``'column_name'``.
- ``OUTPUT_TYPES`` is set to dictionary.
Output:
- the dictionary set in ``OUTPUT_TYPES`` with the ``column_prefix`` string
added to the beginning of the keys.
"""
# Setup
class Dummy(BaseTransformer):
column_prefix = 'column_name'
OUTPUT_TYPES = {
'value': 'numerical'
}
dummy_transformer = Dummy()
# Run
output = dummy_transformer.get_output_types()
# Assert
expected = {
'column_name.value': 'numerical'
}
assert output == expected
def test_get_input_columns(self):
"""Test the ``get_input_columns method.
The method should return a list of all the input column names.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``
and sets the ``columns`` attribute.
Output:
- List matching the list created in the setup.
"""
# Setup
class Dummy(BaseTransformer):
columns = ['col1, col2, col3']
dummy_transformer = Dummy()
# Run
output = dummy_transformer.get_input_columns()
# Assert
expected = ['col1, col2, col3']
assert output == expected
def test_get_output_columns(self):
"""Test the ``get_output_columns`` method.
The method should return a list of all the column names created during ``transform``.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``
and sets the ``column_prefix`` and ``OUTPUT_TYPES`` attributes.
Output:
- A list of each output name with the prefix prepended.
"""
# Setup
class Dummy(BaseTransformer):
column_prefix = 'column_name'
OUTPUT_TYPES = {
'out1': 'numerical',
'out2': 'categorical'
}
dummy_transformer = Dummy()
# Run
output = dummy_transformer.get_output_columns()
# Assert
expected = ['column_name.out1', 'column_name.out2']
assert output == expected
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method properly returns the ``DETERMINISTIC_TRANSFORM`` attribute.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``, where
``DETERMINISTIC_TRANSFORM`` is set to True.
Output:
- the boolean value stored in ``DETERMINISTIC_TRANSFORM``.
"""
# Setup
class Dummy(BaseTransformer):
DETERMINISTIC_TRANSFORM = True
dummy_transformer = Dummy()
# Run
output = dummy_transformer.is_transform_deterministic()
# Assert
assert output is True
def test_is_reverse_deterministic(self):
"""Test the ``is_reverse_deterministic`` method.
Validate that this method properly returns the ``DETERMINISTIC_REVERSE`` attribute.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``, where
``DETERMINISTIC_REVERSE`` is set to True.
Output:
- the boolean value stored in ``DETERMINISTIC_REVERSE``.
"""
# Setup
class Dummy(BaseTransformer):
DETERMINISTIC_REVERSE = True
dummy_transformer = Dummy()
# Run
output = dummy_transformer.is_reverse_deterministic()
# Assert
assert output is True
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Validate that this method properly returns the ``COMPOSITION_IS_IDENTITY`` attribute.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer``, where
``COMPOSITION_IS_IDENTITY`` is set to True.
Output:
- the boolean value stored in ``COMPOSITION_IS_IDENTITY``.
"""
# Setup
class Dummy(BaseTransformer):
COMPOSITION_IS_IDENTITY = True
dummy_transformer = Dummy()
# Run
output = dummy_transformer.is_composition_identity()
# Assert
assert output is True
def test_get_next_transformers(self):
"""Test the ``get_next_transformers`` method.
Validate that the ``_add_prefix`` method is properly applied to the value stored in the
``NEXT_TRANSFORMERS`` attribute.
Setup:
- create a ``Dummy`` class which inherits from the ``BaseTransformer`` where:
- ``column_prefix`` is set to a string.
- ``NEXT_TRANSFORMERS`` is set to dictionary.
Output:
- the dictionary set in ``NEXT_TRANSFORMERS`` with the ``column_prefix`` string
added to the beginning of the keys.
"""
# Setup
class Dummy(BaseTransformer):
column_prefix = 'column_name'
NEXT_TRANSFORMERS = {
'value': 'NullTransformer'
}
dummy_transformer = Dummy()
# Run
output = dummy_transformer.get_next_transformers()
# Assert
expected = {
'column_name.value': 'NullTransformer'
}
assert output == expected
def test__store_columns_list(self):
"""Test the ``_store_columns`` method when passed a list.
When the columns are passed as a list, this method should store the passed columns
of the data in the ``columns`` attribute.
Input:
- a data frame.
- a list of a subset of the columns of the dataframe.
Side effects:
- the ``self.columns`` attribute should be set to the list of the passed columns.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
columns = ['a', 'b']
base_transformer = BaseTransformer()
# Run
base_transformer._store_columns(columns, data)
# Assert
assert base_transformer.columns == ['a', 'b']
def test__store_columns_tuple(self):
"""Test the ``_store_columns`` method when passed a tuple.
When the columns are passed as a tuple (and the tuple itself is not a column name), this
method should store the passed columns of the data in the ``columns`` attribute as a list.
Input:
- a data frame.
- a tuple of a subset of the columns of the dataframe.
Side effects:
- the ``self.columns`` attribute should be set to a list of the passed columns.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
columns = ('a', 'b')
base_transformer = BaseTransformer()
# Run
base_transformer._store_columns(columns, data)
stored_columns = base_transformer.columns
# Assert
assert stored_columns == ['a', 'b']
def test__store_columns_tuple_in_the_data(self):
"""Test the ``_store_columns`` method when passed a tuple which exists in the data.
When the columns are passed as a tuple and the tuple itself is a column name, it should
be treated as such, instead of interpreting the elements of the tuple as column names.
Validate that the stored value in the ``columns`` attribute is a list containing
the passed tuple.
Input:
- a data frame.
- a tuple which is the name of a column.
Side effects:
- the ``self.columns`` attribute should be set to a list containing the passed tuple.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
('a', 'b'): [7, 8, 9]
})
columns = ('a', 'b')
base_transformer = BaseTransformer()
# Run
base_transformer._store_columns(columns, data)
stored_columns = base_transformer.columns
# Assert
assert stored_columns == [('a', 'b')]
def test__store_columns_string(self):
"""Test the ``_store_columns`` method when passed a string.
When the columns are passed as a string, it should be treated as the only column
name passed and stored in the ``columns`` attribute as a one element list.
Input:
- a data frame.
- a string with the name of one of the columns of the dataframe.
Side effects:
- the ``self.columns`` attribute should be set to a list containing the passed string.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
columns = 'a'
base_transformer = BaseTransformer()
# Run
base_transformer._store_columns(columns, data)
stored_columns = base_transformer.columns
# Assert
assert stored_columns == ['a']
def test__store_columns_missing(self):
"""Test the ``_store_columns`` method when passed a missing column.
When the passed column does not exist in the dataframe, it should raise a ``KeyError``.
Input:
- a data frame.
- a list of column names, where at least one of the columns is not
present in the dataframe.
Raises:
- ``KeyError``, with the appropriate error message.
"""
# Setup
data = pd.DataFrame()
columns = ['a', 'b']
base_transformer = BaseTransformer()
missing = set(columns) - set(data.columns)
error_msg = f'Columns {missing} were not present in the data.'
# Run / Assert
with pytest.raises(KeyError, match=error_msg):
base_transformer._store_columns(columns, data)
def test__get_columns_data_multiple_columns(self):
"""Test the ``_get_columns_data`` method.
The method should select the passed columns from the passed data.
Input:
- a dataframe.
- a list of a subset of the columns of the dataframe.
Output:
- the passed dataframe, but containing only the passed columns.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
columns = ['a', 'b']
# Run
columns_data = BaseTransformer._get_columns_data(data, columns)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
})
pd.testing.assert_frame_equal(columns_data, expected)
def test__get_columns_data_single_column(self):
"""Test the ``_get_columns_data`` method when passed a sigle column.
The method should select the passed column from the passed data, and convert it
into a pandas series.
Input:
- a dataframe.
- a list of one column from the dataframe.
Output:
- a pandas series, corresponding to the passed column from the dataframe.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
})
columns = ['b']
# Run
columns_data = BaseTransformer._get_columns_data(data, columns)
# Assert
expected = pd.Series([4, 5, 6], name='b')
pd.testing.assert_series_equal(columns_data, expected)
def test__set_columns_data_series(self):
"""Test the ``_set_columns_data`` method.
The method should not reorder the rows from the ``columns_data``
parameter if it is a ``Series`` and the ``data`` has a different index.
Input:
- data will be a DataFrame with a non-sequential index.
- columns_data will be a Series with a sequential index.
- columns will have the column name of the Series.
Expected behavior:
- Data should have the values from columns_data in the same order
as they were in columns_data.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]
}, index=[2, 0, 1])
columns = ['c']
columns_data = pd.Series([7, 8, 9], name='c')
# Run
BaseTransformer._set_columns_data(data, columns_data, columns)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
}, index=[2, 0, 1])
pd.testing.assert_frame_equal(data, expected)
def test__set_columns_data_dataframe(self):
"""Test the ``_set_columns_data`` method.
The method should not reorder the rows from the ``columns_data``
parameter if it is a ``DataFrame`` and the ``data`` has a different index.
Input:
- data will be a DataFrame with a non-sequential index.
- columns_data will be a Series with a sequential index.
- columns will have the column name of the Series.
Expected behavior:
- Data should have the values from columns_data in the same order
as they were in columns_data.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
}, index=[2, 0, 1])
columns = ['c', 'd']
columns_data = pd.DataFrame({
'c': [7, 8, 9],
'd': [10, 11, 12]
})
# Run
BaseTransformer._set_columns_data(data, columns_data, columns)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9],
'd': [10, 11, 12]
}, index=[2, 0, 1])
pd.testing.assert_frame_equal(data, expected)
def test__set_columns_data_1d_array(self):
"""Test the ``_set_columns_data`` method.
The method should not reorder the rows from the ``columns_data``
parameter if it is a 1d array and the ``data`` has a different index.
Input:
- data will be a DataFrame with a non-sequential index.
- columns_data will be a 1d array.
- columns will have the column name of the array.
Expected behavior:
- Data should have the values from columns_data in the same order
as they were in columns_data.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
}, index=[2, 0, 1])
columns = ['c']
columns_data = np.array([7, 8, 9], dtype=np.int64)
# Run
BaseTransformer._set_columns_data(data, columns_data, columns)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]
}, index=[2, 0, 1])
pd.testing.assert_frame_equal(data, expected)
def test__set_columns_data_2d_array(self):
"""Test the ``_set_columns_data`` method.
The method should not reorder the rows from the ``columns_data``
parameter if it is a ``Series`` and the ``data`` has a different index.
Input:
- data will be a DataFrame with a non-sequential index.
- columns_data will be a 2d array with a sequential index.
- columns will have the column name of the 2d array.
Expected behavior:
- Data should have the values from columns_data in the same order
as they were in columns_data.
"""
# Setup
data = pd.DataFrame({
'a': [1, 2, 3]
}, index=[2, 0, 1])
columns = ['b', 'c']
columns_data = np.array([
[7, 1],
[8, 5],
[9, 9]
], dtype=np.int64)
# Run
BaseTransformer._set_columns_data(data, columns_data, columns)
# Assert
expected = pd.DataFrame({
'a': [1, 2, 3],
'b': [7, 8, 9],
'c': [1, 5, 9]
}, index=[2, 0, 1])
| pd.testing.assert_frame_equal(data, expected) | pandas.testing.assert_frame_equal |
"""Data Updating Utility (:mod:`bucky.util.update_data_repos`).
A utility for fetching updated data for mobility and case data from public repositories.
This module pulls from public git repositories and preprocessed the
data if necessary. For case data, unallocated or unassigned cases are
distributed as necessary.
"""
import logging
import os
import ssl
import subprocess
import urllib.request
import numpy as np
import pandas as pd
import tqdm
from .read_config import bucky_cfg
# Options for correcting territory data
TERRITORY_DATA = bucky_cfg["data_dir"] + "/population/territory_pop.csv"
ADD_AMERICAN_SAMOA = False
# CSSE UIDs for Michigan prison information
MI_PRISON_UIDS = [84070004, 84070005]
# CSSE IDs for Utah local health districts
UT_LHD_UIDS = [84070015, 84070016, 84070017, 84070018, 84070019, 84070020]
def get_timeseries_data(col_name, filename, fips_key="FIPS", is_csse=True):
"""Transforms a historical data file to a dataframe with FIPs, date, and case or death data.
Parameters
----------
col_name : str
Column name to extract from data.
filename : str
Location of filename to read.
fips_key : str, optional
Key used in file for indicating county-level field.
is_csse : bool, optional
Indicates whether the file is CSSE data. If True, certain areas
without FIPS are included.
Returns
-------
df : pandas.DataFrame
Dataframe with the historical data indexed by FIPS, date
"""
# Read file
df = pd.read_csv(filename)
# CSSE-specific correction
if is_csse:
# Michigan prisons have no FIPS, replace with their UID to be processed later
mi_data = df.loc[df["UID"].isin(MI_PRISON_UIDS)]
mi_data = mi_data.assign(FIPS=mi_data["UID"])
df.loc[mi_data.index] = mi_data.values # noqa: PD011
# Utah health districts have NAN FIPS, replace with UID
utah_local_dist_data = df.loc[df["UID"].isin(UT_LHD_UIDS)]
utah_local_dist_data = utah_local_dist_data.assign(FIPS=utah_local_dist_data["UID"])
df.loc[utah_local_dist_data.index] = utah_local_dist_data.values
# Get dates and FIPS columns only
cols = list(df.columns)
idx = cols.index("1/22/20")
# Keep columns after index
keep_cols = cols[idx:]
# Add FIPS
keep_cols.append(fips_key)
# Drop other columns
df = df[keep_cols]
# Reindex and stack
df = df.set_index(fips_key)
# Stack
df = df.stack().reset_index()
# Replace column names
df.columns = ["FIPS", "date", col_name]
return df
def distribute_unallocated_csse(confirmed_file, deaths_file, hist_df):
"""Distributes unallocated historical case and deaths data from CSSE.
JHU CSSE data contains state-level unallocated data, indicated with
"Unassigned" or "Out of" for each state. This function distributes
these unallocated cases based on the proportion of cases in each
county relative to the state.
Parameters
----------
confirmed_file : str
filename of CSSE confirmed data
deaths_file : str
filename of CSSE death data
hist_df : pandas.DataFrame
current historical DataFrame containing confirmed and death data
indexed by date and FIPS code
Returns
-------
hist_df : pandas.DataFrame
modified historical DataFrame with cases and deaths distributed
"""
hist_df = hist_df.reset_index()
if "index" in hist_df.columns:
hist_df = hist_df.drop(columns=["index"])
hist_df = hist_df.assign(state_fips=hist_df["FIPS"] // 1000)
hist_df = hist_df.set_index(["date", "FIPS"])
# Read cases and deaths files
case_df = pd.read_csv(confirmed_file)
deaths_df = pd.read_csv(deaths_file)
# Get unassigned and 'out of X'
cases_unallocated = case_df.loc[
(case_df["Combined_Key"].str.contains("Out of")) | (case_df["Combined_Key"].str.contains("Unassigned"))
]
cases_unallocated = cases_unallocated.assign(state_fips=cases_unallocated["FIPS"].astype(str).str[3:].astype(float))
deaths_unallocated = deaths_df.loc[
(deaths_df["Combined_Key"].str.contains("Out of")) | (deaths_df["Combined_Key"].str.contains("Unassigned"))
]
deaths_unallocated = deaths_unallocated.assign(
state_fips=deaths_unallocated["FIPS"].astype(str).str[3:].astype(float),
)
# Sum unassigned and 'out of X'
extra_cases = cases_unallocated.groupby("state_fips").sum()
extra_deaths = deaths_unallocated.groupby("state_fips").sum()
extra_cases = extra_cases.drop(
columns=[
"UID",
"code3",
"FIPS",
"Lat",
"Long_",
],
)
extra_deaths = extra_deaths.drop(
columns=[
"UID",
"Population",
"code3",
"FIPS",
"Lat",
"Long_",
],
)
# Reformat dates to match processed data's format
extra_cases.columns = pd.to_datetime(extra_cases.columns)
extra_deaths.columns = pd.to_datetime(extra_deaths.columns)
# Iterate over states in historical data
for state_fips in tqdm.tqdm(
extra_cases.index.array,
desc="Distributing unallocated state data",
dynamic_ncols=True,
):
# Get extra cases and deaths
state_extra_cases = extra_cases.xs(state_fips)
state_extra_deaths = extra_deaths.xs(state_fips)
# Get historical data
state_df = hist_df.loc[hist_df["state_fips"] == state_fips]
state_df = state_df.reset_index()
state_confirmed = state_df[["FIPS", "date", "cumulative_reported_cases"]]
state_confirmed = state_confirmed.pivot(index="FIPS", columns="date", values="cumulative_reported_cases")
frac_df = state_confirmed / state_confirmed.sum()
frac_df = frac_df.replace(np.nan, 0)
# Distribute cases and deaths based on this matrix
dist_cases = frac_df.mul(state_extra_cases, axis="columns").T.stack()
dist_deaths = frac_df.mul(state_extra_deaths, axis="columns").T.stack()
# Index historical data
state_df = state_df.set_index(["date", "FIPS"])
tmp = dist_deaths.to_frame(name="cumulative_deaths")
tmp["cumulative_reported_cases"] = dist_cases
state_df += tmp
hist_df.loc[state_df.index] = state_df.values
hist_df = hist_df.drop(columns=["state_fips"])
return hist_df
def distribute_data_by_population(total_df, dist_vect, data_to_dist, replace):
"""Distributes data by population across a state or territory.
Parameters
----------
total_df : pandas.DataFrame
DataFrame containing confirmed and death data indexed by date and
FIPS code
dist_vect : pandas.DataFrame
Population data for each county as proportion of total state
population, indexed by FIPS code
data_to_dist: pandas.DataFrame
Data to distribute, indexed by data
replace : bool
If true, distributed values overwrite current historical data in
DataFrame. If false, distributed values are added to current data
Returns
-------
total_df : pandas.DataFrame
Modified input dataframe with distributed data
"""
# Create temporary dataframe and merge
tmp = total_df.reset_index()
tmp = tmp.merge(dist_vect, on="FIPS")
tmp = tmp.merge(data_to_dist, on="date")
# Use population fraction to scale
if replace:
tmp = tmp.assign(cumulative_reported_cases=tmp["pop_fraction"] * tmp["cumulative_reported_cases_y"])
tmp = tmp.assign(cumulative_deaths=tmp["pop_fraction"] * tmp["cumulative_deaths_y"])
else:
tmp = tmp.assign(
cumulative_reported_cases=tmp["cumulative_reported_cases_x"]
+ tmp["pop_fraction"] * tmp["cumulative_reported_cases_y"],
)
tmp = tmp.assign(
cumulative_deaths=tmp["cumulative_deaths_x"] + tmp["pop_fraction"] * tmp["cumulative_deaths_y"],
)
# Discard merge columns
tmp = tmp[["FIPS", "date", "cumulative_reported_cases", "cumulative_deaths"]]
tmp = tmp.set_index(["FIPS", "date"])
total_df.loc[tmp.index] = tmp.values
return total_df
def get_county_population_data(csse_deaths_file, county_fips):
"""Uses JHU CSSE deaths file to get county population data as as fraction of population across list of counties.
Parameters
----------
csse_deaths_file : str
filename of CSSE deaths file
county_fips: numpy.ndarray
list of FIPS to return population data for
Returns
-------
population_df: pandas.DataFrame
DataFrame with population fraction data indexed by FIPS
"""
# Use CSSE Deaths file to get population values by FIPS
df = pd.read_csv(csse_deaths_file)
population_df = df.loc[df["FIPS"].isin(county_fips)][["FIPS", "Population"]].set_index("FIPS")
population_df = population_df.assign(pop_fraction=population_df["Population"] / population_df["Population"].sum())
population_df = population_df.drop(columns=["Population"])
return population_df
def distribute_utah_data(df, csse_deaths_file):
"""Distributes Utah case data for local health departments spanning multiple counties.
Utah has 13 local health districts, six of which span multiple counties. This
function distributes those cases and deaths by population across their constituent
counties.
Parameters
----------
df : pandas.DataFrame
DataFrame containing historical data indexed by FIPS and date
csse_deaths_file : str
File location of CSSE deaths file
Returns
-------
df : pandas.DataFrame
Modified DataFrame containing corrected Utah historical data
indexed by FIPS and date
"""
local_districts = {
# Box Elder, Cache, Rich
84070015: {"name": "Bear River, Utah, US", "FIPS": [49003, 49005, 49033]},
# Juab, Millard, Piute, Sevier, Wayne, Sanpete
84070016: {"name": "Central Utah, Utah, US", "FIPS": [49023, 49027, 49031, 49041, 49055, 49039]},
# Carbon, Emery, Grand
84070017: {"name": "Southeast Utah, Utah, US", "FIPS": [49007, 49015, 49019]},
# Garfield, Iron, Kane, Washington, Beaver
84070018: {"name": "Southwest Utah, Utah, US", "FIPS": [49017, 49021, 49025, 49053, 49001]},
# Daggett, Duchesne, Uintah
84070019: {"name": "TriCounty, Utah, Utah, US", "FIPS": [49009, 49013, 49047]},
# <NAME>
84070020: {"name": "Weber-Morgan, Utah, US", "FIPS": [49057, 49029]},
}
for district_uid, local_district in local_districts.items():
# Get list of fips
fips_list = local_district["FIPS"]
# Deaths file has population data
county_pop = get_county_population_data(csse_deaths_file, fips_list)
# Get district data
district_data = df.loc[district_uid]
# Add to Michigan data, do not replace
df = distribute_data_by_population(df, county_pop, district_data, True)
# Drop health districts data from dataframe
df = df.loc[~df.index.get_level_values(0).isin(UT_LHD_UIDS)]
return df
def distribute_mdoc(df, csse_deaths_file):
"""Distributes Michigan Department of Corrections data across Michigan counties by population.
Parameters
----------
df : pandas.DataFrame
Current historical DataFrame indexed by FIPS and date, which
includes MDOC and FCI data
csse_deaths_file : str
File location of CSSE deaths file (contains population data)
Returns
-------
df : pandas.DataFrame
Modified historical dataframe with Michigan prison data distributed
and added to Michigan data
"""
# Get Michigan county populations
tmp = df.reset_index()
michigan_counties = tmp.loc[tmp["FIPS"] // 1000 == 26]["FIPS"].unique()
michigan_pop = get_county_population_data(csse_deaths_file, michigan_counties)
# Get prison data
mdoc_data = df.xs(MI_PRISON_UIDS[0], level=0)
fci_data = df.xs(MI_PRISON_UIDS[1], level=0)
# Sum and distribute
mi_unallocated = mdoc_data + fci_data
# Add to Michigan data, do not replace
df = distribute_data_by_population(df, michigan_pop, mi_unallocated, False)
# Drop prison data from dataframe
df = df.loc[~df.index.get_level_values(0).isin(MI_PRISON_UIDS)]
return df
def distribute_territory_data(df, add_american_samoa):
"""Distributes territory-wide case and death data for territories.
Uses county population to distribute cases for US Virgin Islands,
Guam, and CNMI. Optionally adds a single case to the most populous
American Samoan county.
Parameters
----------
df : pandas.DataFrame
Current historical DataFrame indexed by FIPS and date, which
includes territory-wide case and death data
add_american_samoa: bool
If true, adds 1 case to American Samoa
Returns
-------
df : pandas.DataFrame
Modified historical dataframe with territory-wide data
distributed to counties
"""
# Get population data from file
age_df = | pd.read_csv(TERRITORY_DATA, index_col="fips") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
df = | pd.read_csv("Heart.csv") | pandas.read_csv |
import os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import matplotlib.dates as mdates
from datetime import date, timedelta, datetime
import seaborn as sns
import geopandas as gpd
from shapely.geometry import mapping, Point, Polygon
mpl.rcParams['pdf.fonttype'] = 42
fsm_path = '/Volumes/fsmresfiles/PrevMed/Covid-19-Modeling'
idph_data_path = os.path.join(fsm_path, 'IDPH line list',)
ltcf_data_path = os.path.join(fsm_path, 'IDPH LTCF')
cleaned_line_list_fname = os.path.join(idph_data_path,
'LL_200701_JGcleaned_no_race.csv')
ltcf_fname = os.path.join(ltcf_data_path, 'Modelors LTC Report_200703.xlsx')
cleaned_ltcf_fname = os.path.join(ltcf_data_path, 'Modelors LTC Report_200703_first_specimen.csv')
box_data_path = '/Users/jlg1657/Box/NU-malaria-team/data/covid_IDPH'
project_path = '/Users/jlg1657/Box/NU-malaria-team/projects/covid_chicago'
plot_path = os.path.join(project_path, 'Plots + Graphs')
shp_path = os.path.join(box_data_path, 'shapefiles')
def load_daily_ll_deaths() :
df = | pd.read_csv(cleaned_line_list_fname) | pandas.read_csv |
"""
Collects data for the discovery cohort.
"""
from click import *
from logging import *
import janitor
import pandas as pd
import re
@command()
@option(
"--localization-input",
required=True,
help="the CSV file to load localizations from",
)
@option(
"--medication-input",
required=True,
help="the Feather file to load medications from",
)
@option(
"--joint-injection-input",
required=True,
help="the Feather file to load joint injections from",
)
@option("--output", required=True, help="the Feather file to output data to")
@option(
"--visit-id",
type=IntRange(1),
required=True,
multiple=True,
help="the visit IDs to consider",
)
def main(localization_input, medication_input, joint_injection_input, output, visit_id):
basicConfig(level=DEBUG)
# Load data.
info("Loading localizations")
X_localizations = pd.read_csv(localization_input, index_col="subject_id")
debug(f"Result: {X_localizations.shape}")
info("Loading medications")
X_medications = | pd.read_feather(medication_input) | pandas.read_feather |
import os
import pandas as pd
import arff
import numpy as np
from functools import reduce
import sqlite3
import logging
from libs.planet_kaggle import to_multi_label_dict, get_file_count, enrich_with_feature_encoding, featurise_images, generate_validation_files
import tensorflow as tf
from keras.applications.resnet50 import ResNet50
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
_FRAUD_PATH = 'fraud_detection', 'credit_card_fraud_kaggle', 'creditcard.csv'
_IOT_PATH = 'iot', 'sensor_stream_berkeley', 'sensor.arff'
_AIRLINE_PATH = 'airline', 'airline_14col.data'
_FOOTBALL_PATH = 'football', 'database.sqlite'
_BCI_PATH = 'bci', 'data.npz'
_HIGGS_PATH = 'higgs', 'HIGGS.csv'
_KAGGLE_ROOT = 'planet'
_PLANET_KAGGLE_LABEL_CSV = 'train_v2.csv'
_PLANET_KAGGLE_TRAIN_DIR = 'train-jpg'
_PLANET_KAGGLE_VAL_DIR = 'validate-jpg'
def _get_datapath():
try:
datapath = os.environ['MOUNT_POINT']
except KeyError:
logger.info("MOUNT_POINT not found in environment. Defaulting to /fileshare")
datapath = '/fileshare'
return datapath
def load_fraud():
""" Loads the credit card fraud data
The datasets contains transactions made by credit cards in September 2013 by european cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions.
The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation.
Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about
the data.
Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed
with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first
transaction in the dataset.
The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-senstive learning.
Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve
(AUPRC).
Confusion matrix accuracy is not meaningful for unbalanced classification.
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group
(http://mlg.ulb.ac.be) of ULB (Universite Libre de Bruxelles) on big data mining and fraud detection. More details
on current and past projects on related topics are available on http://mlg.ulb.ac.be/BruFence
and http://mlg.ulb.ac.be/ARTML
Please cite: <NAME>, <NAME>, <NAME> and <NAME>. Calibrating Probability with
Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Returns
-------
pandas DataFrame
"""
return pd.read_csv(reduce(os.path.join, _FRAUD_PATH, _get_datapath()))
def load_iot():
""" Loads iot data
Sensor stream contains information (temperature, humidity, light, and sensor voltage) collected from 54 sensors deployed
in Intel Berkeley Research Lab. The whole stream contains consecutive information recorded over a 2 months
period (1 reading per 1-3 minutes). I used the sensor ID as the class label, so the learning task of the stream is
to correctly identify the sensor ID (1 out of 54 sensors) purely based on the sensor data and the corresponding recording
time.
While the data stream flow over time, so does the concepts underlying the stream. For example, the lighting during
the working hours is generally stronger than the night, and the temperature of specific sensors (conference room)
may regularly rise during the meetings.
Returns
-------
pandas DataFrame
"""
dataset = arff.load(open(reduce(os.path.join, _IOT_PATH, _get_datapath())))
columns = [i[0] for i in dataset['attributes']]
return pd.DataFrame(dataset['data'], columns=columns)
def load_airline():
""" Loads airline data
The dataset consists of a large amount of records, containing flight arrival and departure details for all the
commercial flights within the USA, from October 1987 to April 2008. Its size is around 116 million records and
5.76 GB of memory.
There are 13 attributes, each represented in a separate column: Year (1987-2008), Month (1-12), Day of Month (1-31),
Day of Week (1:Monday - 7:Sunday), CRS Departure Time (local time as hhmm), CRS Arrival Time (local time as hhmm),
Unique Carrier, Flight Number, Actual Elapsed Time (in min), Origin, Destination, Distance (in miles), and Diverted
(1=yes, 0=no).
The target attribute is Arrival Delay, it is a positive or negative value measured in minutes.
Link to the source: http://kt.ijs.si/elena_ikonomovska/data.html
Returns
-------
pandas DataFrame
"""
cols = ['Year', 'Month', 'DayofMonth', 'DayofWeek', 'CRSDepTime', 'CRSArrTime', 'UniqueCarrier', 'FlightNum', 'ActualElapsedTime', 'Origin', 'Dest', 'Distance', 'Diverted', 'ArrDelay']
return pd.read_csv(reduce(os.path.join, _AIRLINE_PATH, _get_datapath()), names=cols)
def load_football():
""" Loads football data
Dataset of football stats. +25,000 matches, +10,000 players from 11 European Countries with their lead championship
Seasons 2008 to 2016. It also contains players attributes sourced from EA Sports' FIFA video game series,
including the weekly updates, team line up with squad formation (X, Y coordinates), betting odds from up to 10
providers and detailed match events (goal types, possession, corner, cross, fouls, cards etc...) for +10,000 matches.
The meaning of the columns can be found here: http://www.football-data.co.uk/notes.txt
Number of attributes in each table (size of the dataframe):
countries (11, 2)
matches (25979, 115)
leagues (11, 3)
teams (299, 5)
players (183978, 42)
Link to the source: https://www.kaggle.com/hugomathien/soccer
Returns
-------
list of pandas DataFrame
"""
database_path = reduce(os.path.join, _FOOTBALL_PATH, _get_datapath())
with sqlite3.connect(database_path) as con:
countries = | pd.read_sql_query("SELECT * from Country", con) | pandas.read_sql_query |
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.topology import Molecule, Topology
from biopandas.pdb import PandasPdb
import matplotlib.pyplot as plt
from operator import itemgetter
from mendeleev import element
from simtk.openmm import app
from scipy import optimize
import subprocess as sp
from sys import stdout
import pandas as pd
import numpy as np
import statistics
import itertools
import parmed
import pickle
import shutil
import simtk
import scipy
import time
import math
import sys
import ast
import re
import os
BOHRS_PER_ANGSTROM = 0.529
HARTREE_PER_KCAL_MOL = 627.509391
#kcal/mol * A^2 to kJ/mol * nm^2
KCAL_MOL_PER_KJ_MOL = 4.184
ANGSTROMS_PER_NM = 10.0
RADIANS_PER_DEGREE = np.pi / 180.0
method_basis_scale_dict = {
"HF STO-3G": 0.817,
"HF 3-21G": 0.906,
"HF 3-21G*": 0.903,
"HF 6-31G": 0.903,
"HF 6-31G*": 0.899,
"HF 6-31G**": 0.903,
"HF 6-31+G**": 0.904,
"HF 6-311G*": 0.904,
"HF 6-311G**": 0.909,
"HF TZVP": 0.909,
"HF cc-pVDZ": 0.908,
"HF cc-pVTZ": 0.91,
"HF cc-pVQZ": 0.908,
"HF aug-cc-pVDZ": 0.911,
"HF aug-cc-pVTZ": 0.91,
"HF aug-cc-pVQZ": 0.909,
"HF daug-cc-pVDZ": 0.912,
"HF daug-cc-pVTZ": 0.905,
"ROHF 3-21G": 0.907,
"ROHF 3-21G*": 0.909,
"ROHF 6-31G": 0.895,
"ROHF 6-31G*": 0.89,
"ROHF 6-31G**": 0.855,
"ROHF 6-31+G**": 0.856,
"ROHF 6-311G*": 0.856,
"ROHF 6-311G**": 0.913,
"ROHF cc-pVDZ": 0.861,
"ROHF cc-pVTZ": 0.901,
"LSDA STO-3G": 0.896,
"LSDA 3-21G": 0.984,
"LSDA 3-21G*": 0.982,
"LSDA 6-31G": 0.98,
"LSDA 6-31G*": 0.981,
"LSDA 6-31G**": 0.981,
"LSDA 6-31+G**": 0.985,
"LSDA 6-311G*": 0.984,
"LSDA 6-311G**": 0.988,
"LSDA TZVP": 0.988,
"LSDA cc-pVDZ": 0.989,
"LSDA cc-pVTZ": 0.989,
"LSDA aug-cc-pVDZ": 0.989,
"LSDA aug-cc-pVTZ": 0.991,
"BLYP STO-3G": 0.925,
"BLYP 3-21G": 0.995,
"BLYP 3-21G*": 0.994,
"BLYP 6-31G": 0.992,
"BLYP 6-31G*": 0.992,
"BLYP 6-31G**": 0.992,
"BLYP 6-31+G**": 0.995,
"BLYP 6-311G*": 0.998,
"BLYP 6-311G**": 0.996,
"BLYP TZVP": 0.998,
"BLYP cc-pVDZ": 1.002,
"BLYP cc-pVTZ": 0.997,
"BLYP aug-cc-pVDZ": 0.998,
"BLYP aug-cc-pVTZ": 0.997,
"B1B95 STO-3G": 0.883,
"B1B95 3-21G": 0.957,
"B1B95 3-21G*": 0.955,
"B1B95 6-31G": 0.954,
"B1B95 6-31G*": 0.949,
"B1B95 6-31G**": 0.955,
"B1B95 6-31+G**": 0.957,
"B1B95 6-311G*": 0.959,
"B1B95 6-311G**": 0.96,
"B1B95 TZVP": 0.957,
"B1B95 cc-pVDZ": 0.961,
"B1B95 cc-pVTZ": 0.957,
"B1B95 aug-cc-pVDZ": 0.958,
"B1B95 aug-cc-pVTZ": 0.959,
"B3LYP STO-3G": 0.892,
"B3LYP 3-21G": 0.965,
"B3LYP 3-21G*": 0.962,
"B3LYP 6-31G": 0.962,
"B3LYP 6-31G*": 0.96,
"B3LYP 6-31G**": 0.961,
"B3LYP 6-31+G**": 0.964,
"B3LYP 6-311G*": 0.966,
"B3LYP 6-311G**": 0.967,
"B3LYP TZVP": 0.965,
"B3LYP cc-pVDZ": 0.97,
"B3LYP cc-pVTZ": 0.967,
"B3LYP cc-pVQZ": 0.969,
"B3LYP aug-cc-pVDZ": 0.97,
"B3LYP aug-cc-pVTZ": 0.968,
"B3LYP aug-cc-pVQZ": 0.969,
"B3PW91 STO-3G": 0.885,
"B3PW91 3-21G": 0.961,
"B3PW91 3-21G*": 0.959,
"B3PW91 6-31G": 0.958,
"B3PW91 6-31G*": 0.957,
"B3PW91 6-31G**": 0.958,
"B3PW91 6-31+G**": 0.96,
"B3PW91 6-311G*": 0.963,
"B3PW91 6-311G**": 0.963,
"B3PW91 TZVP": 0.964,
"B3PW91 cc-pVDZ": 0.965,
"B3PW91 cc-pVTZ": 0.962,
"B3PW91 aug-cc-pVDZ": 0.965,
"B3PW91 aug-cc-pVTZ": 0.965,
"mPW1PW91 STO-3G": 0.879,
"mPW1PW91 3-21G": 0.955,
"mPW1PW91 3-21G*": 0.95,
"mPW1PW91 6-31G": 0.947,
"mPW1PW91 6-31G*": 0.948,
"mPW1PW91 6-31G**": 0.952,
"mPW1PW91 6-31+G**": 0.952,
"mPW1PW91 6-311G*": 0.954,
"mPW1PW91 6-311G**": 0.957,
"mPW1PW91 TZVP": 0.954,
"mPW1PW91 cc-pVDZ": 0.958,
"mPW1PW91 cc-pVTZ": 0.959,
"mPW1PW91 aug-cc-pVDZ": 0.958,
"mPW1PW91 aug-cc-pVTZ": 0.958,
"PBEPBE STO-3G": 0.914,
"PBEPBE 3-21G": 0.991,
"PBEPBE 3-21G*": 0.954,
"PBEPBE 6-31G": 0.986,
"PBEPBE 6-31G*": 0.986,
"PBEPBE 6-31G**": 0.986,
"PBEPBE 6-31+G**": 0.989,
"PBEPBE 6-311G*": 0.99,
"PBEPBE 6-311G**": 0.991,
"PBEPBE TZVP": 0.989,
"PBEPBE cc-pVDZ": 0.994,
"PBEPBE cc-pVTZ": 0.993,
"PBEPBE aug-cc-pVDZ": 0.994,
"PBEPBE aug-cc-pVTZ": 0.994,
"PBE1PBE STO-3G": 0.882,
"PBE1PBE 3-21G": 0.96,
"PBE1PBE 3-21G*": 0.96,
"PBE1PBE 6-31G": 0.956,
"PBE1PBE 6-31G*": 0.95,
"PBE1PBE 6-31G**": 0.953,
"PBE1PBE 6-31+G**": 0.955,
"PBE1PBE 6-311G*": 0.959,
"PBE1PBE 6-311G**": 0.959,
"PBE1PBE TZVP": 0.96,
"PBE1PBE cc-pVDZ": 0.962,
"PBE1PBE cc-pVTZ": 0.961,
"PBE1PBE aug-cc-pVDZ": 0.962,
"PBE1PBE aug-cc-pVTZ": 0.962,
"HSEh1PBE STO-3G": 0.883,
"HSEh1PBE 3-21G": 0.963,
"HSEh1PBE 3-21G*": 0.96,
"HSEh1PBE 6-31G": 0.957,
"HSEh1PBE 6-31G*": 0.951,
"HSEh1PBE 6-31G**": 0.954,
"HSEh1PBE 6-31+G**": 0.955,
"HSEh1PBE 6-311G*": 0.96,
"HSEh1PBE 6-311G**": 0.96,
"HSEh1PBE TZVP": 0.96,
"HSEh1PBE cc-pVDZ": 0.962,
"HSEh1PBE cc-pVTZ": 0.961,
"HSEh1PBE aug-cc-pVDZ": 0.962,
"HSEh1PBE aug-cc-pVTZ": 0.962,
"TPSSh 3-21G": 0.969,
"TPSSh 3-21G*": 0.966,
"TPSSh 6-31G": 0.962,
"TPSSh 6-31G*": 0.959,
"TPSSh 6-31G**": 0.959,
"TPSSh 6-31+G**": 0.963,
"TPSSh 6-311G*": 0.963,
"TPSSh TZVP": 0.964,
"TPSSh cc-pVDZ": 0.972,
"TPSSh cc-pVTZ": 0.968,
"TPSSh aug-cc-pVDZ": 0.967,
"TPSSh aug-cc-pVTZ": 0.965,
"B97D3 3-21G": 0.983,
"B97D3 6-31G*": 0.98,
"B97D3 6-31+G**": 0.983,
"B97D3 6-311G**": 0.986,
"B97D3 TZVP": 0.986,
"B97D3 cc-pVDZ": 0.992,
"B97D3 cc-pVTZ": 0.986,
"B97D3 aug-cc-pVTZ": 0.985,
"MP2 STO-3G": 0.872,
"MP2 3-21G": 0.955,
"MP2 3-21G*": 0.951,
"MP2 6-31G": 0.957,
"MP2 6-31G*": 0.943,
"MP2 6-31G**": 0.937,
"MP2 6-31+G**": 0.941,
"MP2 6-311G*": 0.95,
"MP2 6-311G**": 0.95,
"MP2 TZVP": 0.948,
"MP2 cc-pVDZ": 0.953,
"MP2 cc-pVTZ": 0.95,
"MP2 cc-pVQZ": 0.948,
"MP2 aug-cc-pVDZ": 0.959,
"MP2 aug-cc-pVTZ": 0.953,
"MP2 aug-cc-pVQZ": 0.95,
"MP2=FULL STO-3G": 0.889,
"MP2=FULL 3-21G": 0.955,
"MP2=FULL 3-21G*": 0.948,
"MP2=FULL 6-31G": 0.95,
"MP2=FULL 6-31G*": 0.942,
"MP2=FULL 6-31G**": 0.934,
"MP2=FULL 6-31+G**": 0.939,
"MP2=FULL 6-311G*": 0.947,
"MP2=FULL 6-311G**": 0.949,
"MP2=FULL TZVP": 0.953,
"MP2=FULL cc-pVDZ": 0.95,
"MP2=FULL cc-pVTZ": 0.949,
"MP2=FULL cc-pVQZ": 0.957,
"MP2=FULL aug-cc-pVDZ": 0.969,
"MP2=FULL aug-cc-pVTZ": 0.951,
"MP2=FULL aug-cc-pVQZ": 0.956,
"MP3 STO-3G": 0.894,
"MP3 3-21G": 0.968,
"MP3 3-21G*": 0.965,
"MP3 6-31G": 0.966,
"MP3 6-31G*": 0.939,
"MP3 6-31G**": 0.935,
"MP3 6-31+G**": 0.931,
"MP3 TZVP": 0.935,
"MP3 cc-pVDZ": 0.948,
"MP3 cc-pVTZ": 0.945,
"MP3=FULL 6-31G*": 0.938,
"MP3=FULL 6-31+G**": 0.932,
"MP3=FULL TZVP": 0.934,
"MP3=FULL cc-pVDZ": 0.94,
"MP3=FULL cc-pVTZ": 0.933,
"B2PLYP 6-31G*": 0.949,
"B2PLYP 6-31+G**": 0.952,
"B2PLYP TZVP": 0.954,
"B2PLYP cc-pVDZ": 0.958,
"B2PLYP cc-pVTZ": 0.959,
"B2PLYP cc-pVQZ": 0.957,
"B2PLYP aug-cc-pVTZ": 0.961,
"B2PLYP=FULL 3-21G": 0.952,
"B2PLYP=FULL 6-31G*": 0.948,
"B2PLYP=FULL 6-31+G**": 0.951,
"B2PLYP=FULL TZVP": 0.954,
"B2PLYP=FULL cc-pVDZ": 0.959,
"B2PLYP=FULL cc-pVTZ": 0.956,
"B2PLYP=FULL aug-cc-pVDZ": 0.962,
"B2PLYP=FULL aug-cc-pVTZ": 0.959,
"CID 3-21G": 0.932,
"CID 3-21G*": 0.931,
"CID 6-31G": 0.935,
"CID 6-31G*": 0.924,
"CID 6-31G**": 0.924,
"CID 6-31+G**": 0.924,
"CID 6-311G*": 0.929,
"CID cc-pVDZ": 0.924,
"CID cc-pVTZ": 0.927,
"CISD 3-21G": 0.941,
"CISD 3-21G*": 0.934,
"CISD 6-31G": 0.938,
"CISD 6-31G*": 0.926,
"CISD 6-31G**": 0.918,
"CISD 6-31+G**": 0.922,
"CISD 6-311G*": 0.925,
"CISD cc-pVDZ": 0.922,
"CISD cc-pVTZ": 0.93,
"QCISD 3-21G": 0.969,
"QCISD 3-21G*": 0.961,
"QCISD 6-31G": 0.964,
"QCISD 6-31G*": 0.952,
"QCISD 6-31G**": 0.941,
"QCISD 6-31+G**": 0.945,
"QCISD 6-311G*": 0.957,
"QCISD 6-311G**": 0.954,
"QCISD TZVP": 0.955,
"QCISD cc-pVDZ": 0.959,
"QCISD cc-pVTZ": 0.956,
"QCISD aug-cc-pVDZ": 0.969,
"QCISD aug-cc-pVTZ": 0.962,
"CCD 3-21G": 0.972,
"CCD 3-21G*": 0.957,
"CCD 6-31G": 0.96,
"CCD 6-31G*": 0.947,
"CCD 6-31G**": 0.938,
"CCD 6-31+G**": 0.942,
"CCD 6-311G*": 0.955,
"CCD 6-311G**": 0.955,
"CCD TZVP": 0.948,
"CCD cc-pVDZ": 0.957,
"CCD cc-pVTZ": 0.934,
"CCD aug-cc-pVDZ": 0.965,
"CCD aug-cc-pVTZ": 0.957,
"CCSD 3-21G": 0.943,
"CCSD 3-21G*": 0.943,
"CCSD 6-31G": 0.943,
"CCSD 6-31G*": 0.944,
"CCSD 6-31G**": 0.933,
"CCSD 6-31+G**": 0.934,
"CCSD 6-311G*": 0.954,
"CCSD TZVP": 0.954,
"CCSD cc-pVDZ": 0.947,
"CCSD cc-pVTZ": 0.941,
"CCSD cc-pVQZ": 0.951,
"CCSD aug-cc-pVDZ": 0.963,
"CCSD aug-cc-pVTZ": 0.956,
"CCSD aug-cc-pVQZ": 0.953,
"CCSD=FULL 6-31G*": 0.95,
"CCSD=FULL TZVP": 0.948,
"CCSD=FULL cc-pVTZ": 0.948,
"CCSD=FULL aug-cc-pVTZ": 0.951,
}
element_list = [
["1 ", "H ", "Hydrogen"],
["2 ", "He", "Helium"],
["3 ", "Li", "Lithium"],
["4 ", "Be", "Beryllium"],
["5 ", "B ", "Boron"],
["6 ", "C ", "Carbon"],
["7 ", "N ", "Nitrogen"],
["8 ", "O ", "Oxygen"],
["9 ", "F ", "Fluorine"],
["10", "Ne", "Neon"],
["11", "Na", "Sodium"],
["12", "Mg", "Magnesium"],
["13", "Al", "Aluminum"],
["14", "Si", "Silicon"],
["15", "P ", "Phosphorus"],
["16", "S ", "Sulfur"],
["17", "Cl", "Chlorine"],
["18", "Ar", "Argon"],
["19", "K ", "Potassium"],
["20", "Ca", "Calcium"],
["21", "Sc", "Scandium"],
["22", "Ti", "Titanium"],
["23", "V ", "Vanadium"],
["24", "Cr", "Chromium"],
["25", "Mn", "Manganese"],
["26", "Fe", "Iron"],
["27", "Co", "Cobalt"],
["28", "Ni", "Nickel"],
["29", "Cu", "Copper"],
["30", "Zn", "Zinc"],
["31", "Ga", "Gallium"],
["32", "Ge", "Germanium"],
["33", "As", "Arsenic"],
["34", "Se", "Selenium"],
["35", "Br", "Bromine"],
["36", "Kr", "Krypton"],
["37", "Rb", "Rubidium"],
["38", "Sr", "Strontium"],
["39", "Y ", "Yttrium"],
["40", "Zr", "Zirconium"],
["41", "Nb", "Niobium"],
["42", "Mo", "Molybdenum"],
["43", "Tc", "Technetium"],
["44", "Ru", "Ruthenium"],
["45", "Rh", "Rhodium"],
["46", "Pd", "Palladium"],
["47", "Ag", "Silver"],
["48", "Cd", "Cadmium"],
["49", "In", "Indium"],
["50", "Sn", "Tin"],
["51", "Sb", "Antimony"],
["52", "Te", "Tellurium"],
["53", "I ", "Iodine"],
["54", "Xe", "Xenon"],
["55", "Cs", "Cesium"],
["56", "Ba", "Barium"],
["57", "La", "Lanthanum"],
["58", "Ce", "Cerium"],
["59", "Pr", "Praseodymium"],
["60", "Nd", "Neodymium"],
["61", "Pm", "Promethium"],
["62", "Sm", "Samarium"],
["63", "Eu", "Europium"],
["64", "Gd", "Gadolinium"],
["65", "Tb", "Terbium"],
["66", "Dy", "Dysprosium"],
["67", "Ho", "Holmium"],
["68", "Er", "Erbium"],
["69", "Tm", "Thulium"],
["70", "Yb", "Ytterbium"],
["71", "Lu", "Lutetium"],
["72", "Hf", "Hafnium"],
["73", "Ta", "Tantalum"],
["74", "W ", "Tungsten"],
["75", "Re", "Rhenium"],
["76", "Os", "Osmium"],
["77", "Ir", "Iridium"],
["78", "Pt", "Platinum"],
["79", "Au", "Gold"],
["80", "Hg", "Mercury"],
["81", "Tl", "Thallium"],
["82", "Pb", "Lead"],
["83", "Bi", "Bismuth"],
["84", "Po", "Polonium"],
["85", "At", "Astatine"],
["86", "Rn", "Radon"],
["87", "Fr", "Francium"],
["88", "Ra", "Radium"],
["89", "Ac", "Actinium"],
["90", "Th", "Thorium"],
["91", "Pa", "Protactinium"],
["92", "U ", "Uranium"],
["93", "Np", "Neptunium"],
["94", "Pu", "Plutonium"],
["95", "Am", "Americium"],
["96", "Cm", "Curium"],
["97", "Bk", "Berkelium"],
["98", "Cf", "Californium"],
["99", "Es", "Einsteinium"],
]
def get_vibrational_scaling(functional, basis_set):
"""
Returns vibrational scaling factor given the functional
and the basis set for the QM engine.
Parameters
----------
functional: str
Functional
basis_set: str
Basis set
Returns
-------
vib_scale: float
Vibrational scaling factor corresponding to the given
the basis_set and the functional.
Examples
--------
>>> get_vibrational_scaling("QCISD", "6-311G*")
0.957
"""
vib_scale = method_basis_scale_dict.get(functional + " " + basis_set)
return vib_scale
def unit_vector_N(u_BC, u_AB):
"""
Calculates unit normal vector perpendicular to plane ABC.
Parameters
----------
u_BC : (.. , 1, 3) array
Unit vector from atom B to atom C.
u_AB : (..., 1, 3) array
Unit vector from atom A to atom B.
Returns
-------
u_N : (..., 1, 3) array
Unit normal vector perpendicular to plane ABC.
Examples
--------
>>> u_BC = [0.34040355, 0.62192853, 0.27011169]
>>> u_AB = [0.28276792, 0.34232697, 0.02370306]
>>> unit_vector_N(u_BC, u_AB)
array([-0.65161629, 0.5726879 , -0.49741811])
"""
cross_product = np.cross(u_BC, u_AB)
norm_u_N = np.linalg.norm(cross_product)
u_N = cross_product / norm_u_N
return u_N
def delete_guest_angle_params(guest_qm_params_file="guest_qm_params.txt"):
"""
"""
f_params = open(guest_qm_params_file, "r")
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
lines_selected = lines_params[:to_begin] + lines_params[to_end + 1 :]
with open(guest_qm_params_file, "w") as f_:
f_.write("".join(lines_selected))
return
def remove_bad_angle_params(
guest_qm_params_file="guest_qm_params.txt", angle=1.00, k_angle=500):
with open(guest_qm_params_file, "r") as f_params:
lines_params = f_params.readlines()
for i in range(len(lines_params)):
if "Begin writing the Angle Parameters" in lines_params[i]:
to_begin = int(i)
if "Finish writing the Angle Parameters" in lines_params[i]:
to_end = int(i)
angle_params = lines_params[to_begin + 1 : to_end]
lines_to_omit = []
for i in angle_params:
if float(re.findall(r"[-+]?\d+[.]?\d*", i)[0]) < float(angle) or float(
re.findall(r"[-+]?\d+[.]?\d*", i)[1]
) > float(k_angle):
lines_to_omit.append(i)
for b in lines_to_omit:
lines_params.remove(b)
with open(guest_qm_params_file, "w") as file:
for j in lines_params:
file.write(j)
def get_num_host_atoms(host_pdb):
"""
Reads the host PDB file and returns the
total number of atoms.
"""
ppdb = PandasPdb()
ppdb.read_pdb(host_pdb)
no_host_atoms = ppdb.df["ATOM"].shape[0]
return no_host_atoms
def change_names(inpcrd_file, prmtop_file, pdb_file):
command = "cp -r " + inpcrd_file + " system_qmmmrebind.inpcrd"
os.system(command)
command = "cp -r " + prmtop_file + " system_qmmmrebind.prmtop"
os.system(command)
command = "cp -r " + pdb_file + " system_qmmmrebind.pdb"
os.system(command)
def copy_file(source, destination):
"""
Copies a file from a source to the destination.
"""
shutil.copy(source, destination)
def get_openmm_energies(system_pdb, system_xml):
"""
Returns decomposed OPENMM energies for the
system.
Parameters
----------
system_pdb : str
Input PDB file
system_xml : str
Forcefield file in XML format
"""
pdb = simtk.openmm.app.PDBFile(system_pdb)
ff_xml_file = open(system_xml, "r")
system = simtk.openmm.XmlSerializer.deserialize(ff_xml_file.read())
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(pdb.topology, system, integrator)
simulation.context.setPositions(pdb.positions)
state = simulation.context.getState(
getEnergy=True, getParameters=True, getForces=True
)
force_group = []
for i, force in enumerate(system.getForces()):
force_group.append(force.__class__.__name__)
forcegroups = {}
for i in range(system.getNumForces()):
force = system.getForce(i)
force.setForceGroup(i)
forcegroups[force] = i
energies = {}
for f, i in forcegroups.items():
energies[f] = (
simulation.context.getState(getEnergy=True, groups=2 ** i)
.getPotentialEnergy()
._value
)
decomposed_energy = []
for key, val in energies.items():
decomposed_energy.append(val)
df_energy_openmm = pd.DataFrame(
list(zip(force_group, decomposed_energy)),
columns=["Energy_term", "Energy_openmm_params"],
)
energy_values = [
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicBondForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "HarmonicAngleForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "PeriodicTorsionForce"
].values[0]
)[1],
list(
df_energy_openmm.loc[
df_energy_openmm["Energy_term"] == "NonbondedForce"
].values[0]
)[1],
]
energy_group = [
"HarmonicBondForce",
"HarmonicAngleForce",
"PeriodicTorsionForce",
"NonbondedForce",
]
df_energy_open_mm = pd.DataFrame(
list(zip(energy_group, energy_values)),
columns=["Energy_term", "Energy_openmm_params"],
)
df_energy_open_mm = df_energy_open_mm.set_index("Energy_term")
print(df_energy_open_mm)
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array which contains the coordinates of all
the N atoms.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def force_angle_constant(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14 of
Seminario calculation paper; returns angle (in kcal/mol/rad^2)
and equilibrium angle (in degrees).
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y and Z
coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
# Normal vector to angle plane found
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_u_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_u_PA
u_PC = np.cross(u_CB, u_N)
norm_u_PC = np.linalg.norm(u_PC)
u_PC = u_PC / norm_u_PC
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Scaling due to additional angles - Modified Seminario Part
sum_first = sum_first / scaling_1
sum_second = sum_second / scaling_2
# Added as two springs in series
k_theta = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta = 1 / k_theta
k_theta = -k_theta # Change to OPLS form
k_theta = abs(k_theta * 0.5) # Change to OPLS form
# Equilibrium Angle
theta_0 = math.degrees(math.acos(np.dot(u_AB, u_CB)))
# If the vectors u_CB and u_AB are linearly dependent u_N cannot be defined.
# This case is dealt with here :
if abs(sum((u_CB) - (u_AB))) < 0.01 or (
abs(sum((u_CB) - (u_AB))) > 1.99 and abs(sum((u_CB) - (u_AB))) < 2.01
):
scaling_1 = 1
scaling_2 = 1
[k_theta, theta_0] = force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
)
return k_theta, theta_0
def dot_product(u_PA, eig_AB):
"""
Returns the dot product of two vectors.
Parameters
----------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the
plane of A, B, C.
eig_AB : (..., 3, 3) array
Eigenvectors of the hessian matrix for
the bond AB.
"""
x = 0
for i in range(0, 3):
x = x + u_PA[i] * eig_AB[i].conjugate()
return x
def force_angle_constant_special_case(
atom_A,
atom_B,
atom_C,
bond_lengths,
eigenvalues,
eigenvectors,
coords,
scaling_1,
scaling_2,
):
"""
Calculates force constant according to Equation 14
of Seminario calculation paper when the vectors
u_CB and u_AB are linearly dependent and u_N cannot
be defined. It instead takes samples of u_N across a
unit sphere for the calculation; returns angle
(in kcal/mol/rad^2) and equilibrium angle in degrees.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
bond_lengths : (N, N) array
An N * N array containing the bond lengths for
all the possible pairs of atoms.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing
eigenvalues of the hessian matrix, where N
is the total number of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y,
and Z coordinates of all N atoms.
scaling_1 : float
Factor to scale the projections of eigenvalues for AB.
scaling_2 : float
Factor to scale the projections of eigenvalues for BC.
Returns
-------
k_theta : float
Force angle constant calculated using modified
seminario method.
k_0 : float
Equilibrium angle between AB and BC.
"""
# Vectors along bonds calculated
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
# Bond lengths and eigenvalues found
bond_length_AB = bond_lengths[atom_A, atom_B]
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[0:3, 0:3, atom_A, atom_B]
bond_length_BC = bond_lengths[atom_B, atom_C]
eigenvalues_CB = eigenvalues[atom_C, atom_B, :]
eigenvectors_CB = eigenvectors[0:3, 0:3, atom_C, atom_B]
k_theta_array = np.zeros((180, 360))
# Find force constant with varying u_N (with vector uniformly
# sampled across a sphere)
for theta in range(0, 180):
for phi in range(0, 360):
r = 1
u_N = [
r
* math.sin(math.radians(theta))
* math.cos(math.radians(theta)),
r
* math.sin(math.radians(theta))
* math.sin(math.radians(theta)),
r * math.cos(math.radians(theta)),
]
u_PA = np.cross(u_N, u_AB)
u_PA = u_PA / np.linalg.norm(u_PA)
u_PC = np.cross(u_CB, u_N)
u_PC = u_PC / np.linalg.norm(u_PC)
sum_first = 0
sum_second = 0
# Projections of eigenvalues
for i in range(0, 3):
eig_AB_i = eigenvectors_AB[:, i]
eig_BC_i = eigenvectors_CB[:, i]
sum_first = sum_first + (
eigenvalues_AB[i] * abs(dot_product(u_PA, eig_AB_i))
)
sum_second = sum_second + (
eigenvalues_CB[i] * abs(dot_product(u_PC, eig_BC_i))
)
# Added as two springs in series
k_theta_ij = (1 / ((bond_length_AB ** 2) * sum_first)) + (
1 / ((bond_length_BC ** 2) * sum_second)
)
k_theta_ij = 1 / k_theta_ij
k_theta_ij = -k_theta_ij # Change to OPLS form
k_theta_ij = abs(k_theta_ij * 0.5) # Change to OPLS form
k_theta_array[theta, phi] = k_theta_ij
# Removes cases where u_N was linearly dependent of u_CB or u_AB.
# Force constant used is taken as the mean.
k_theta = np.mean(np.mean(k_theta_array))
# Equilibrium Angle independent of u_N
theta_0 = math.degrees(math.cos(np.dot(u_AB, u_CB)))
return k_theta, theta_0
def force_constant_bond(atom_A, atom_B, eigenvalues, eigenvectors, coords):
"""
Calculates the bond force constant for the bonds in the
molecule according to equation 10 of seminario paper,
given the bond atoms' indices and the corresponding
eigenvalues, eigenvectors and coordinates matrices.
Parameters
----------
atom_A : int
Index of Atom A.
atom_B : int
Index of Atom B.
eigenvalues : (N, N, 3) array
A numpy array of shape (N, N, 3) containing eigenvalues
of the hessian matrix, where N is the total number
of atoms.
eigenvectors : (3, 3, N, N) array
A numpy array of shape (3, 3, N, N) containing the
eigenvectors of the hessian matrix.
coords : (N, 3) array
A numpy array of shape (N, 3) having the X, Y, and
Z coordinates of all N atoms.
Returns
--------
k_AB : float
Bond Force Constant value for the bond with atoms A and B.
"""
# Eigenvalues and eigenvectors calculated
eigenvalues_AB = eigenvalues[atom_A, atom_B, :]
eigenvectors_AB = eigenvectors[:, :, atom_A, atom_B]
# Vector along bond
diff_AB = np.array(coords[atom_B, :]) - np.array(coords[atom_A, :])
norm_diff_AB = np.linalg.norm(diff_AB)
unit_vectors_AB = diff_AB / norm_diff_AB
k_AB = 0
# Projections of eigenvalues
for i in range(0, 3):
dot_product = abs(np.dot(unit_vectors_AB, eigenvectors_AB[:, i]))
k_AB = k_AB + (eigenvalues_AB[i] * dot_product)
k_AB = -k_AB * 0.5 # Convert to OPLS form
return k_AB
def u_PA_from_angles(atom_A, atom_B, atom_C, coords):
"""
Returns the vector in the plane A,B,C and perpendicular to AB.
Parameters
----------
atom_A : int
Index of atom A (left, starting from 0).
atom_B : int
Index of atom B (center, starting from 0).
atom_C : int
Index of atom C (right, starting from 0).
coords : (..., N, 3) array
An array containing the coordinates of all the N atoms.
Returns
-------
u_PA : (..., 1, 3) array
Unit vector perpendicular to AB and in the plane of A, B, C.
"""
diff_AB = coords[atom_B, :] - coords[atom_A, :]
norm_diff_AB = np.linalg.norm(diff_AB)
u_AB = diff_AB / norm_diff_AB
diff_CB = coords[atom_B, :] - coords[atom_C, :]
norm_diff_CB = np.linalg.norm(diff_CB)
u_CB = diff_CB / norm_diff_CB
u_N = unit_vector_N(u_CB, u_AB)
u_PA = np.cross(u_N, u_AB)
norm_PA = np.linalg.norm(u_PA)
u_PA = u_PA / norm_PA
return u_PA
def reverse_list(lst):
"""
Returns the reversed form of a given list.
Parameters
----------
lst : list
Input list.
Returns
-------
reversed_list : list
Reversed input list.
Examples
--------
>>> lst = [5, 4, 7, 2]
>>> reverse_list(lst)
[2, 7, 4, 5]
"""
reversed_list = lst[::-1]
return reversed_list
def uniq(input_):
"""
Returns a list with only unique elements from a list
containing duplicate / repeating elements.
Parameters
----------
input_ : list
Input list.
Returns
-------
output : list
List with only unique elements.
Examples
--------
>>> lst = [2, 4, 2, 9, 10, 35, 10]
>>> uniq(lst)
[2, 4, 9, 10, 35]
"""
output = []
for x in input_:
if x not in output:
output.append(x)
return output
def search_in_file(file: str, word: str) -> list:
"""
Search for the given string in file and return lines
containing that string along with line numbers.
Parameters
----------
file : str
Input file.
word : str
Search word.
Returns
-------
list_of_results : list
List of lists with each element representing the
line number and the line contents.
"""
line_number = 0
list_of_results = []
with open(file, "r") as f:
for line in f:
line_number += 1
if word in line:
list_of_results.append((line_number, line.rstrip()))
return list_of_results
def list_to_dict(lst):
"""
Converts an input list with mapped characters (every
odd entry is the key of the dictionary and every
even entry adjacent to the odd entry is its correponding
value) to a dictionary.
Parameters
----------
lst : list
Input list.
Returns
-------
res_dct : dict
A dictionary with every element mapped with
its successive element starting from index 0.
Examples
--------
>>> lst = [5, 9, 3, 6, 2, 7]
>>> list_to_dict(lst)
{5: 9, 3: 6, 2: 7}
"""
res_dct = {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
return res_dct
def scale_list(list_):
"""
Returns a scaled list with the minimum value
subtracted from each element of the corresponding list.
Parameters
----------
list_ : list
Input list.
Returns
-------
scaled_list : list
Scaled list.
Examples
--------
>>> list_ = [6, 3, 5, 11, 3, 2, 8, 6]
>>> scale_list(list_)
[4, 1, 3, 9, 1, 0, 6, 4]
"""
scaled_list = [i - min(list_) for i in list_]
return scaled_list
def list_kJ_kcal(list_):
"""
Convert the elements in the list from
kiloJoules units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of kJ.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_kJ_kcal(list_)
[1.4340344168260037, 0.7170172084130019, 1.1950286806883366]
"""
converted_list = [i / 4.184 for i in list_]
return converted_list
def list_hartree_kcal(list_):
"""
Convert the elements in the list from
hartree units to kiloCalories units.
Parameters
----------
list_ : list
List with elements in units of hartree.
Returns
-------
converted_list : list
List with elements in units of kcal.
Examples
--------
>>> list_ = [6, 3, 5]
>>> list_hartree_kcal(list_)
[3765.0564000000004, 1882.5282000000002, 3137.547]
"""
converted_list = [i * 627.5094 for i in list_]
return converted_list
def torsiondrive_input_to_xyz(psi_input_file, xyz_file):
"""
Returns an xyz file from a torsiondrive formatted
input file.
Parameters
----------
psi_input_file : str
Input file for the psi4 QM engine.
xyz_file : str
XYZ format file to write the coords of the system.
"""
with open(psi_input_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "molecule {" in lines[i]:
to_begin = int(i)
if "set {" in lines[i]:
to_end = int(i)
xyz_lines = lines[to_begin + 2 : to_end - 1]
with open(xyz_file, "w") as f:
f.write(str(len(xyz_lines)) + "\n")
f.write(xyz_file + "\n")
for i in xyz_lines:
f.write(i)
def xyz_to_pdb(xyz_file, coords_file, template_pdb, system_pdb):
"""
Converts a XYZ file to a PDB file.
Parameters
----------
xyz_file : str
XYZ file containing the coordinates of the system.
coords_file : str
A text file containing the coordinates part of XYZ file.
template_pdb : str
A pdb file to be used as a template for the required PDB.
system_pdb : str
Output PDB file with the coordinates updated in the
template pdb using XYZ file.
"""
with open(xyz_file, "r") as f:
lines = f.readlines()
needed_lines = lines[2:]
with open(coords_file, "w") as f:
for i in needed_lines:
f.write(i)
df = pd.read_csv(coords_file, header=None, delimiter=r"\s+")
df.columns = ["atom", "x", "y", "z"]
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = df["x"]
ppdb.df["ATOM"]["y_coord"] = df["y"]
ppdb.df["ATOM"]["z_coord"] = df["z"]
ppdb.to_pdb(system_pdb)
def generate_xml_from_pdb_sdf(system_pdb, system_sdf, system_xml):
"""
Generates an openforcefield xml file from the pdb file.
Parameters
----------
system_pdb : str
Input PDB file.
system_sdf : str
SDF file of the system.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_sdf
os.system(command)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def generate_xml_from_charged_pdb_sdf(
system_pdb,
system_init_sdf,
system_sdf,
num_charge_atoms,
index_charge_atom_1,
charge_atom_1,
system_xml,
):
"""
Generates an openforcefield xml file from the pdb
file via SDF file and openforcefield.
Parameters
----------
system_pdb : str
Input PDB file.
system_init_sdf : str
SDF file for the system excluding charge information.
system_sdf : str
SDF file of the system.
num_charge_atoms : int
Total number of charged atoms in the PDB.
index_charge_atom_1 : int
Index of the first charged atom.
charge_atom_1 : float
Charge on first charged atom.
system_xml : str
XML force field file generated using PDB and SDF files.
"""
# command = "babel -ipdb " + system_pdb + " -osdf " + system_init_sdf
command = "obabel -ipdb " + system_pdb + " -osdf -O " + system_init_sdf
os.system(command)
with open(system_init_sdf, "r") as f1:
filedata = f1.readlines()
filedata = filedata[:-2]
with open(system_sdf, "w+") as out:
for i in filedata:
out.write(i)
line_1 = (
"M CHG "
+ str(num_charge_atoms)
+ " "
+ str(index_charge_atom_1)
+ " "
+ str(charge_atom_1)
+ "\n"
)
line_2 = "M END" + "\n"
line_3 = "$$$$"
out.write(line_1)
out.write(line_2)
out.write(line_3)
# off_molecule = openforcefield.topology.Molecule(system_sdf)
off_molecule = Molecule(system_sdf)
# force_field = openforcefield.typing.engines.smirnoff.ForceField("openff_unconstrained-1.0.0.offxml")
force_field = ForceField("openff_unconstrained-1.0.0.offxml")
system = force_field.create_openmm_system(off_molecule.to_topology())
pdbfile = simtk.openmm.app.PDBFile(system_pdb)
structure = parmed.openmm.load_topology(
pdbfile.topology, system, xyz=pdbfile.positions
)
with open(system_xml, "w") as f:
f.write(simtk.openmm.XmlSerializer.serialize(system))
def get_dihedrals(qm_scan_file):
"""
Returns dihedrals from the torsiondrive scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
dihedrals : list
List of all the dihedral values from the qm scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
return dihedrals
def get_qm_energies(qm_scan_file):
"""
Returns QM optimized energies from the torsiondrive
scan file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
Returns
-------
qm_energies : list
List of all the qm optimiseed energies extracted from the torsiondrive
scan file.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
qm_energies = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
energy = float(energy_dihedral[1])
qm_energies.append(energy)
return qm_energies
def generate_mm_pdbs(qm_scan_file, template_pdb):
"""
Generate PDBs from the torsiondrive scan file
based on a template PDB.
"""
with open(qm_scan_file, "r") as f:
lines = f.readlines()
energy_dihedral_lines = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
energy_dihedral_lines.append(lines[i])
dihedrals = []
for i in energy_dihedral_lines:
energy_dihedral = i
energy_dihedral = re.findall(r"[-+]?\d+[.]?\d*", energy_dihedral)
dihedral = float(energy_dihedral[0])
dihedrals.append(dihedral)
lines_markers = []
for i in range(len(lines)):
if "Dihedral" in lines[i]:
lines_markers.append(i)
lines_markers.append(len(lines) + 1)
for i in range(len(lines_markers) - 1):
# pdb_file_to_write = str(dihedrals[i]) + ".pdb"
if dihedrals[i] > 0:
pdb_file_to_write = "plus_" + str(abs(dihedrals[i])) + ".pdb"
if dihedrals[i] < 0:
pdb_file_to_write = "minus_" + str(abs(dihedrals[i])) + ".pdb"
to_begin = lines_markers[i]
to_end = lines_markers[i + 1]
lines_to_write = lines[to_begin + 1 : to_end - 1]
x_coords = []
y_coords = []
z_coords = []
for i in lines_to_write:
coordinates = i
coordinates = re.findall(r"[-+]?\d+[.]?\d*", coordinates)
x = float(coordinates[0])
y = float(coordinates[1])
z = float(coordinates[2])
x_coords.append(x)
y_coords.append(y)
z_coords.append(z)
ppdb = PandasPdb()
ppdb.read_pdb(template_pdb)
ppdb.df["ATOM"]["x_coord"] = x_coords
ppdb.df["ATOM"]["y_coord"] = y_coords
ppdb.df["ATOM"]["z_coord"] = z_coords
ppdb.to_pdb(pdb_file_to_write)
def remove_mm_files(qm_scan_file):
"""
Delete all generated PDB files.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
command = "rm -rf " + i
os.system(command)
command = "rm -rf " + i[:-4] + ".inpcrd"
os.system(command)
command = "rm -rf " + i[:-4] + ".prmtop"
os.system(command)
def get_non_torsion_mm_energy(system_pdb, load_topology, system_xml):
"""
Returns sum of all the non-torsional energies (that
includes HarmonicBondForce, HarmonicAngleForce
and NonBondedForce) of the system from the PDB
file given the topology and the forcefield file.
Parameters
----------
system_pdb : str
System PDB file to load the openmm system topology
and coordinates.
load_topology : {"openmm", "parmed"}
Argument to specify how to load the topology.
system_xml : str
XML force field file for the openmm system.
Returns
-------
Sum of all the non-torsional energies of the system.
"""
system_prmtop = system_pdb[:-4] + ".prmtop"
system_inpcrd = system_pdb[:-4] + ".inpcrd"
if load_topology == "parmed":
openmm_system = parmed.openmm.load_topology(
parmed.load_file(system_pdb, structure=True).topology,
parmed.load_file(system_xml),
)
if load_topology == "openmm":
openmm_system = parmed.openmm.load_topology(
simtk.openmm.app.PDBFile(system_pdb).topology,
parmed.load_file(system_xml),
)
openmm_system.save(system_prmtop, overwrite=True)
openmm_system.coordinates = parmed.load_file(
system_pdb, structure=True
).coordinates
openmm_system.save(system_inpcrd, overwrite=True)
parm = parmed.load_file(system_prmtop, system_inpcrd)
prmtop_energy_decomposition = parmed.openmm.energy_decomposition_system(
parm, parm.createSystem()
)
# print(prmtop_energy_decomposition)
prmtop_energy_decomposition_value_no_torsion = [
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicBondForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("HarmonicAngleForce"),
list_to_dict(
[
item
for sublist in [
list(elem) for elem in prmtop_energy_decomposition
]
for item in sublist
]
).get("NonbondedForce"),
]
return sum(prmtop_energy_decomposition_value_no_torsion)
def get_mm_potential_energies(qm_scan_file, load_topology, system_xml):
"""
Returns potential energy of the system from the PDB file
given the topology and the forcefield file.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to spcify how to load the topology.
system_xml : str
XML file to load the openmm system.
Returns
-------
mm_potential_energies : list
List of all the non torsion mm energies for the
generated PDB files.
"""
mm_pdb_list = []
for i in get_dihedrals(qm_scan_file):
if i > 0:
pdb_file = "plus_" + str(abs(i)) + ".pdb"
if i < 0:
pdb_file = "minus_" + str(abs(i)) + ".pdb"
mm_pdb_list.append(pdb_file)
for i in mm_pdb_list:
mm_pdb_file = i
mm_potential_energies = []
for i in mm_pdb_list:
mm_pdb_file = i
mm_energy = get_non_torsion_mm_energy(
system_pdb=i, load_topology=load_topology, system_xml=system_xml,
)
mm_potential_energies.append(mm_energy)
return mm_potential_energies
def list_diff(list_1, list_2):
"""
Returns the difference between two lists as a list.
Parameters
----------
list_1 : list
First list
list_2 : list
Second list.
Returns
-------
diff_list : list
List containing the diferences between the elements of
the two lists.
Examples
--------
>>> list_1 = [4, 2, 8, 3, 0, 6, 7]
>>> list_2 = [5, 3, 1, 5, 6, 0, 4]
>>> list_diff(list_1, list_2)
[-1, -1, 7, -2, -6, 6, 3]
"""
diff_list = []
zipped_list = zip(list_1, list_2)
for list1_i, list2_i in zipped_list:
diff_list.append(list1_i - list2_i)
return diff_list
def dihedral_energy(x, k1, k2, k3, k4=0):
"""
Expression for the dihedral energy.
"""
energy_1 = k1 * (1 + np.cos(1 * x * 0.01745))
energy_2 = k2 * (1 - np.cos(2 * x * 0.01745))
energy_3 = k3 * (1 + np.cos(3 * x * 0.01745))
energy_4 = k4 * (1 - np.cos(4 * x * 0.01745))
dihedral_energy = energy_1 + energy_2 + energy_3 + energy_4
return dihedral_energy
def error_function(delta_qm, delta_mm):
"""
Root Mean Squared Error.
"""
squared_error = np.square(np.subtract(delta_qm, delta_mm))
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def error_function_boltzmann(delta_qm, delta_mm, T):
"""
Boltzmann Root Mean Squared Error.
"""
kb = 3.297623483 * 10 ** (-24) # in cal/K
delta_qm_boltzmann_weighted = [np.exp(-i / (kb * T)) for i in delta_qm]
squared_error = (
np.square(np.subtract(delta_qm, delta_mm))
* delta_qm_boltzmann_weighted
)
mean_squared_error = squared_error.mean()
root_mean_squared_error = math.sqrt(mean_squared_error)
return root_mean_squared_error
def gen_init_guess(qm_scan_file, load_topology, system_xml):
"""
Initial guess for the torsional parameter.
Parameters
----------
qm_scan_file : str
Output scan file containing torsiondrive scans.
load_topology : {"openmm", "parmed"}
Argument to speify how to load the topology.
system_xml : str
XML force field file for the system.
Returns
-------
k_init_guess : list
Initial guess for the torsional parameters.
"""
x = get_dihedrals(qm_scan_file)
y = scale_list(
list_=get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
)
init_vals = [0.0, 0.0, 0.0, 0.0]
k_init_guess, covar = scipy.optimize.curve_fit(
dihedral_energy, x, y, p0=init_vals
)
for i in range(len(k_init_guess)):
if k_init_guess[i] < 0:
k_init_guess[i] = 0
return k_init_guess
def objective_function(k_array, x, delta_qm):
"""
Objective function for the torsional parameter fitting.
"""
delta_mm = dihedral_energy(
x, k1=k_array[0], k2=k_array[1], k3=k_array[2], k4=k_array[3]
)
loss_function = error_function(delta_qm, delta_mm)
return loss_function
def fit_params(qm_scan_file, load_topology, system_xml, method):
"""
Optimization of the objective function.
"""
k_guess = gen_init_guess(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
x_data = np.array(get_dihedrals(qm_scan_file))
delta_qm = np.array(
scale_list(list_hartree_kcal(list_=get_qm_energies(qm_scan_file)))
)
optimise = scipy.optimize.minimize(
objective_function,
k_guess,
args=(x_data, delta_qm),
method=method,
bounds=[(0.00, None), (0.00, None), (0.00, None), (0.00, None),],
)
return optimise.x
def get_tor_params(
qm_scan_file, template_pdb, load_topology, system_xml, method
):
"""
Returns the fitted torsional parameters.
"""
qm_e = get_qm_energies(qm_scan_file=qm_scan_file)
qm_e_kcal = list_hartree_kcal(qm_e)
delta_qm = scale_list(qm_e_kcal)
generate_mm_pdbs(qm_scan_file=qm_scan_file, template_pdb=template_pdb)
mm_pe_no_torsion_kcal = get_mm_potential_energies(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
)
delta_mm = scale_list(mm_pe_no_torsion_kcal)
opt_param = fit_params(
qm_scan_file=qm_scan_file,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
return opt_param
def get_torsional_lines(
template_pdb,
system_xml,
qm_scan_file,
load_topology,
method,
dihedral_text_file,
):
"""
Returns the torsional lines for the XML forcefield file.
"""
opt_param = get_tor_params(
qm_scan_file=qm_scan_file,
template_pdb=template_pdb,
load_topology=load_topology,
system_xml=system_xml,
method=method,
)
dihedral_text = open(dihedral_text_file, "r")
dihedral_text_lines = dihedral_text.readlines()
atom_numbers = dihedral_text_lines[-1]
atom_index_from_1 = [
int(re.findall(r"\d+", atom_numbers)[0]),
int(re.findall(r"\d+", atom_numbers)[1]),
int(re.findall(r"\d+", atom_numbers)[2]),
int(re.findall(r"\d+", atom_numbers)[3]),
]
atom_index = [i - 1 for i in atom_index_from_1]
atom_index_lines = (
" "
+ "p1="
+ '"'
+ str(atom_index[0])
+ '"'
+ " "
+ "p2="
+ '"'
+ str(atom_index[1])
+ '"'
+ " "
+ "p3="
+ '"'
+ str(atom_index[2])
+ '"'
+ " "
+ "p4="
+ '"'
+ str(atom_index[3])
+ '"'
+ " "
)
tor_lines = []
for i in range(len(opt_param)):
line_to_append = (
" "
+ "<Torsion "
+ "k="
+ '"'
+ str(round(opt_param[i], 8))
+ '"'
+ atom_index_lines
+ "periodicity="
+ '"'
+ str(i + 1)
+ '"'
+ " "
+ "phase="
+ '"'
+ "0"
+ '"'
+ "/>"
)
# print(line_to_append)
tor_lines.append(line_to_append)
return tor_lines
def singular_resid(pdbfile, qmmmrebind_init_file):
"""
Returns a PDB file with chain ID = A
Parameters
----------
pdbfile: str
Input PDB file
qmmmrebind_init_file: str
Output PDB file
"""
ppdb = PandasPdb().read_pdb(pdbfile)
ppdb.df["HETATM"]["chain_id"] = "A"
ppdb.df["ATOM"]["chain_id"] = "A"
ppdb.to_pdb(
path=qmmmrebind_init_file, records=None, gz=False, append_newline=True
)
def relax_init_structure(
pdbfile,
prmtopfile,
qmmmrebindpdb,
sim_output="output.pdb",
sim_steps=100000,
):
"""
Minimizing the initial PDB file with the given topology
file
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile : str
Input prmtop file.
qmmmrebind_init_file: str
Output PDB file.
sim_output: str
Simulation output trajectory file.
sim_steps: int
MD simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedMethod=simtk.openmm.app.PME,
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=10000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.reporters.append(
simtk.openmm.app.PDBReporter(qmmmrebindpdb, sim_steps)
)
simulation.step(sim_steps)
command = "rm -rf " + sim_output
os.system(command)
def truncate(x):
"""
Returns a float or an integer with an exact number
of characters.
Parameters
----------
x: str
input value
"""
if len(str(int(float(x)))) == 1:
x = format(x, ".8f")
if len(str(int(float(x)))) == 2:
x = format(x, ".7f")
if len(str(int(float(x)))) == 3:
x = format(x, ".6f")
if len(str(int(float(x)))) == 4:
x = format(x, ".5f")
if len(str(x)) > 10:
x = round(x, 10)
return x
def add_vectors_inpcrd(pdbfile, inpcrdfile):
"""
Adds periodic box dimensions to the inpcrd file
Parameters
----------
pdbfile: str
PDB file containing the periodic box information.
inpcrdfile: str
Input coordinate file.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
line_to_add = (
" "
+ truncate(vector_list[0])
+ " "
+ truncate(vector_list[1])
+ " "
+ truncate(vector_list[2])
+ " "
+ truncate(vector_list[3])
+ " "
+ truncate(vector_list[4])
+ " "
+ truncate(vector_list[5])
)
print(line_to_add)
with open(inpcrdfile, "a+") as f:
f.write(line_to_add)
def add_dim_prmtop(pdbfile, prmtopfile):
"""
Adds periodic box dimensions flag in the prmtop file.
Parameters
----------
prmtopfile: str
Input prmtop file.
pdbfile: str
PDB file containing the periodic box information.
"""
pdbfilelines = open(pdbfile, "r").readlines()
for i in pdbfilelines:
if "CRYST" in i:
vector_list = re.findall(r"[-+]?\d*\.\d+|\d+", i)
vector_list = [float(i) for i in vector_list]
vector_list = vector_list[1 : 1 + 6]
vector_list = [i / 10 for i in vector_list]
vector_list = [truncate(i) for i in vector_list]
vector_list = [i + "E+01" for i in vector_list]
line3 = (
" "
+ vector_list[3]
+ " "
+ vector_list[0]
+ " "
+ vector_list[1]
+ " "
+ vector_list[2]
)
print(line3)
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
with open(prmtopfile) as f1, open("intermediate.prmtop", "w") as f2:
for line in f1:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f2.write(line)
command = "rm -rf " + prmtopfile
os.system(command)
command = "mv intermediate.prmtop " + prmtopfile
os.system(command)
def add_period_prmtop(parm_file, ifbox):
"""
Changes the value of IFBOX if needed for the prmtop / parm file.
Set to 1 if standard periodic box and 2 when truncated octahedral.
"""
with open(parm_file) as f:
parm_lines = f.readlines()
lines_contain = []
for i in range(len(parm_lines)):
if parm_lines[i].startswith("%FLAG POINTERS"):
lines_contain.append(i + 4)
line = parm_lines[lines_contain[0]]
line_new = "%8s %6s %6s %6s %6s %6s %6s %6s %6s %6s" % (
re.findall(r"\d+", line)[0],
re.findall(r"\d+", line)[1],
re.findall(r"\d+", line)[2],
re.findall(r"\d+", line)[3],
re.findall(r"\d+", line)[4],
re.findall(r"\d+", line)[5],
re.findall(r"\d+", line)[6],
str(ifbox),
re.findall(r"\d+", line)[8],
re.findall(r"\d+", line)[9],
)
parm_lines[lines_contain[0]] = line_new + "\n"
with open(parm_file, "w") as f:
for i in parm_lines:
f.write(i)
def add_solvent_pointers_prmtop(non_reparams_file, reparams_file):
"""
Adds the flag solvent pointers to the topology file.
"""
f_non_params = open(non_reparams_file, "r")
lines_non_params = f_non_params.readlines()
for i in range(len(lines_non_params)):
if "FLAG SOLVENT_POINTERS" in lines_non_params[i]:
to_begin = int(i)
solvent_pointers = lines_non_params[to_begin : to_begin + 3]
file = open(reparams_file, "a")
for i in solvent_pointers:
file.write(i)
def prmtop_calibration(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
):
"""
Standardizes the topology files
Parameters
----------
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
"""
parm = parmed.load_file(prmtopfile, inpcrdfile)
parm_1 = parmed.tools.actions.changeRadii(parm, "mbondi3")
parm_1.execute()
parm_2 = parmed.tools.actions.setMolecules(parm)
parm_2.execute()
parm.save(prmtopfile, overwrite=True)
def run_openmm_prmtop_inpcrd(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with inpcrd and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
inpcrdfile: str
Input coordinate file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
inpcrd = simtk.openmm.app.AmberInpcrdFile(inpcrdfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
if inpcrd.boxVectors is None:
add_vectors_inpcrd(
pdbfile=pdbfile, inpcrdfile=inpcrdfile,
)
if inpcrd.boxVectors is not None:
simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
print(inpcrd.boxVectors)
simulation.context.setPositions(inpcrd.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def run_openmm_prmtop_pdb(
pdbfile="system_qmmmrebind.pdb",
prmtopfile="system_qmmmrebind.prmtop",
sim_output="output.pdb",
sim_steps=10000,
):
"""
Runs OpenMM simulation with pdb and prmtop files.
Parameters
----------
pdbfile: str
Input PDB file.
prmtopfile: str
Input prmtop file.
sim_output: str
Output trajectory file.
sim_steps: int
Simulation steps.
"""
prmtop = simtk.openmm.app.AmberPrmtopFile(prmtopfile)
pdb = simtk.openmm.app.PDBFile(pdbfile)
system = prmtop.createSystem(
nonbondedCutoff=1 * simtk.unit.nanometer,
constraints=simtk.openmm.app.HBonds,
)
integrator = simtk.openmm.LangevinIntegrator(
300 * simtk.unit.kelvin,
1 / simtk.unit.picosecond,
0.002 * simtk.unit.picoseconds,
)
simulation = simtk.openmm.app.Simulation(
prmtop.topology, system, integrator
)
simulation.context.setPositions(pdb.positions)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.minimizeEnergy(maxIterations=1000000)
print(simulation.context.getState(getEnergy=True).getPotentialEnergy())
simulation.reporters.append(
simtk.openmm.app.PDBReporter(sim_output, int(sim_steps / 10))
)
simulation.reporters.append(
simtk.openmm.app.StateDataReporter(
stdout,
int(sim_steps / 10),
step=True,
potentialEnergy=True,
temperature=True,
)
)
simulation.step(sim_steps)
def move_qmmmmrebind_files(
prmtopfile="system_qmmmrebind.prmtop",
inpcrdfile="system_qmmmrebind.inpcrd",
pdbfile="system_qmmmrebind.pdb",
):
"""
Moves QMMMReBind generated topology and parameter files
to a new directory .
Parameters
----------
prmtopfile: str
QMMMReBind generated prmtop file.
inpcrdfile: str
QMMMReBind generated inpcrd file.
pdbfile: str
QMMMReBind generated PDB file.
"""
current_pwd = os.getcwd()
command = "rm -rf reparameterized_files"
os.system(command)
command = "mkdir reparameterized_files"
os.system(command)
shutil.copy(
current_pwd + "/" + prmtopfile,
current_pwd + "/" + "reparameterized_files" + "/" + prmtopfile,
)
shutil.copy(
current_pwd + "/" + inpcrdfile,
current_pwd + "/" + "reparameterized_files" + "/" + inpcrdfile,
)
shutil.copy(
current_pwd + "/" + pdbfile,
current_pwd + "/" + "reparameterized_files" + "/" + pdbfile,
)
def move_qm_files():
"""
Moves QM engine generated files to a new directory .
"""
current_pwd = os.getcwd()
command = "rm -rf qm_data"
os.system(command)
command = "mkdir qm_data"
os.system(command)
command = "cp -r " + "*.com* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.log* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.chk* " + current_pwd + "/" + "qm_data"
os.system(command)
command = "cp -r " + "*.fchk* " + current_pwd + "/" + "qm_data"
os.system(command)
def move_qmmmrebind_files():
"""
Moves all QMMMREBind files to a new directory.
"""
current_pwd = os.getcwd()
command = "rm -rf qmmmrebind_data"
os.system(command)
command = "mkdir qmmmrebind_data"
os.system(command)
command = "mv " + "*.sdf* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.txt* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.pdb* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xml* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.chk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.fchk* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.com* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.log* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.inpcrd* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.prmtop* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.parm7* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.out* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*run_command* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.dat* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
command = "mv " + "*.xyz* " + current_pwd + "/" + "qmmmrebind_data"
os.system(command)
class PrepareQMMM:
"""
A class used to segregate the QM and MM regions.
This class contain methods to remove the solvent, ions and all
entities that are exclusive of receptor and the ligand. It also
defines the Quantum Mechanical (QM) region and the Molecular
Mechanical (MM) region based upon the distance of the ligand
from the receptor and the chosen number of receptor residues. It
is also assumed that the initial PDB file will have the receptor
followed by the ligand.
...
Attributes
----------
init_pdb : str
Initial PDB file containing the receptor-ligand complex with
solvent, ions, etc.
cleaned_pdb : str
Formatted PDB file containing only the receptor and the ligand.
guest_init_pdb : str
A separate ligand PDB file with atom numbers not beginning from 1.
host_pdb : str
A separate receptor PDB file with atom numbers beginning from 1.
guest_resname : str
Three letter residue ID for the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
guest_xyz : str, optional
A text file of the XYZ coordinates of the ligand.
distance : float, optional
The distance required to define the QM region of the receptor.
This is the distance between the atoms of the ligand and the
atoms of the receptor.
residue_list : str, optional
A text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
host_qm_atoms : str, optional
A text file of the atom numbers of the receptors in the QM
region.
host_mm_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region (all atoms except atoms in the QM region)
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
host_mm_pdb : str, optional
PDB file for the receptor's MM region.
qm_pdb : str, optional
PDB file for the QM region (receptor's QM region and the
ligand).
mm_pdb : str, optional
PDB file for the MM region.
host_mm_region_I_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region preceeding the QM region.
host_mm_region_II_atoms : str, optional
A text file of the atom numbers of the receptors in the MM
region following the QM region.
host_mm_region_I_pdb : str, optional
PDB file of the receptor in the MM region preceeding the
QM region.
host_mm_region_II_pdb : str, optional
PDB file of the receptor in the MM region following the
QM region.
num_residues : int, optional
Number of residues required in the QM region of the receptor.
"""
def __init__(
self,
init_pdb,
distance,
num_residues,
guest_resname,
cleaned_pdb="system.pdb",
guest_init_pdb="guest_init.pdb",
host_pdb="host.pdb",
guest_pdb="guest_init_II.pdb",
guest_xyz="guest_coord.txt",
residue_list="residue_list.txt",
host_qm_atoms="host_qm.txt",
host_mm_atoms="host_mm.txt",
host_qm_pdb="host_qm.pdb",
host_mm_pdb="host_mm.pdb",
qm_pdb="qm.pdb",
mm_pdb="mm.pdb",
host_mm_region_I_atoms="host_mm_region_I.txt",
host_mm_region_II_atoms="host_mm_region_II.txt",
host_mm_region_I_pdb="host_mm_region_I.pdb",
host_mm_region_II_pdb="host_mm_region_II.pdb",
):
self.init_pdb = init_pdb
self.distance = distance
self.num_residues = num_residues
self.guest_resname = guest_resname
self.cleaned_pdb = cleaned_pdb
self.guest_init_pdb = guest_init_pdb
self.host_pdb = host_pdb
self.guest_pdb = guest_pdb
self.guest_xyz = guest_xyz
self.residue_list = residue_list
self.host_qm_atoms = host_qm_atoms
self.host_mm_atoms = host_mm_atoms
self.host_qm_pdb = host_qm_pdb
self.host_mm_pdb = host_mm_pdb
self.qm_pdb = qm_pdb
self.mm_pdb = mm_pdb
self.host_mm_region_I_atoms = host_mm_region_I_atoms
self.host_mm_region_II_atoms = host_mm_region_II_atoms
self.host_mm_region_I_pdb = host_mm_region_I_pdb
self.host_mm_region_II_pdb = host_mm_region_II_pdb
def clean_up(self):
"""
Reads the given PDB file, removes all entities except the
receptor and ligand and saves a new pdb file.
"""
ions = [
"Na+",
"Cs+",
"K+",
"Li+",
"Rb+",
"Cl-",
"Br-",
"F-",
"I-",
"Ca2",
]
intermediate_file_1 = self.cleaned_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.cleaned_pdb[:-4] + "_intermediate_2.pdb"
command = (
"pdb4amber -i "
+ self.init_pdb
+ " -o "
+ intermediate_file_1
+ " --noter --dry"
)
os.system(command)
to_delete = (
intermediate_file_1[:-4] + "_nonprot.pdb",
intermediate_file_1[:-4] + "_renum.txt",
intermediate_file_1[:-4] + "_sslink",
intermediate_file_1[:-4] + "_water.pdb",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_1) as f1, open(
intermediate_file_2, "w") as f2:
for line in f1:
if not any(ion in line for ion in ions):
f2.write(line)
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.cleaned_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def create_host_guest(self):
"""
Saves separate receptor and ligand PDB files.
"""
with open(self.cleaned_pdb) as f1, open(self.host_pdb, "w") as f2:
for line in f1:
if not self.guest_resname in line and not "CRYST1" in line:
f2.write(line)
with open(self.cleaned_pdb) as f1, open(
self.guest_init_pdb, "w"
) as f2:
for line in f1:
if self.guest_resname in line or "END" in line:
f2.write(line)
def realign_guest(self):
"""
Saves a ligand PDB file with atom numbers beginning from 1.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_init_pdb)
to_subtract = min(ppdb.df["ATOM"]["atom_number"]) - 1
ppdb.df["ATOM"]["atom_number"] = (
ppdb.df["ATOM"]["atom_number"] - to_subtract
)
intermediate_file_1 = self.guest_pdb[:-4] + "_intermediate_1.pdb"
intermediate_file_2 = self.guest_pdb[:-4] + "_intermediate_2.pdb"
ppdb.to_pdb(path=intermediate_file_1)
command = (
"pdb4amber -i "
+ intermediate_file_1
+ " -o "
+ intermediate_file_2
)
os.system(command)
to_delete = (
intermediate_file_2[:-4] + "_nonprot.pdb",
intermediate_file_2[:-4] + "_renum.txt",
intermediate_file_2[:-4] + "_sslink",
)
os.system("rm -rf " + " ".join(to_delete))
with open(intermediate_file_2, "r") as f1:
filedata = f1.read()
filedata = filedata.replace("HETATM", "ATOM ")
with open(self.guest_pdb, "w") as f2:
f2.write(filedata)
command = "rm -rf " + intermediate_file_1 + " " + intermediate_file_2
os.system(command)
def get_guest_coord(self):
"""
Saves a text file of the XYZ coordinates of the ligand.
"""
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
xyz = ppdb.df["ATOM"][["x_coord", "y_coord", "z_coord"]]
xyz_to_list = xyz.values.tolist()
np.savetxt(self.guest_xyz, xyz_to_list)
def get_qm_resids(self):
"""
Saves a text file of the residue numbers of the receptor within the
proximity (as defined by the distance) from the ligand.
"""
guest_coord_list = np.loadtxt(self.guest_xyz)
host_atom_list = []
for i in range(len(guest_coord_list)):
reference_point = guest_coord_list[i]
# TODO: move reads outside of loop
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
distances = ppdb.distance(xyz=reference_point, records=("ATOM"))
all_within_distance = ppdb.df["ATOM"][
distances < float(self.distance)
]
host_df = all_within_distance["atom_number"]
host_list = host_df.values.tolist()
host_atom_list.append(host_list)
host_atom_list = list(itertools.chain(*host_atom_list))
host_atom_list = set(host_atom_list)
host_atom_list = list(host_atom_list)
host_atom_list.sort()
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
index_list = []
for i in host_atom_list:
indices = np.where(df["atom_number"] == i)
indices = list(indices)[0]
indices = list(indices)
index_list.append(indices)
index_list = list(itertools.chain.from_iterable(index_list))
df1 = df.iloc[
index_list,
]
# TODO: make it write list of integers
resid_num = list(df1.residue_number.unique())
np.savetxt(self.residue_list, resid_num, fmt="%i")
def get_host_qm_mm_atoms(self):
"""
Saves a text file of the atom numbers of the receptors in the QM
region and MM region separately.
"""
resid_num = np.loadtxt(self.residue_list)
# approximated_res_list = [int(i) for i in resid_num]
approximated_res_list = []
# TODO: what is this doing?
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
host_index_nested_list = []
for i in approximated_res_list:
indices = np.where(df["residue_number"] == i)
#TODO: the program seems to error when this line is removed, which
# makes no sense.
indices = list(indices)[0]
indices = list(indices)
host_index_nested_list.append(indices)
host_index_list = list(
itertools.chain.from_iterable(host_index_nested_list)
)
df_atom = df.iloc[host_index_list]
df_atom_number = df_atom["atom_number"]
host_atom_list = df_atom_number.values.tolist()
selected_atoms = []
selected_atoms.extend(host_atom_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
non_selected_atoms = list(set(len_atoms).difference(selected_atoms))
assert len(non_selected_atoms) + len(selected_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_qm_atoms, selected_atoms, fmt="%i")
np.savetxt(self.host_mm_atoms, non_selected_atoms, fmt="%i")
def save_host_pdbs(self):
"""
Saves a PDB file for the receptor's QM region and MM
region separately.
"""
selected_atoms = np.loadtxt(self.host_qm_atoms)
# TODO: not necessary if savetxt writes in integers
selected_atoms = [int(i) for i in selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_pdb, records=None, gz=False, append_newline=True,
)
non_selected_atoms = np.loadtxt(self.host_mm_atoms)
non_selected_atoms = [int(i) for i in non_selected_atoms]
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
for i in non_selected_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_qm_pdb, records=None, gz=False, append_newline=True,
)
def get_host_mm_region_atoms(self):
"""
Saves a text file for the atoms of the receptor's MM region
preceding the QM region and saves another text file for the
atoms of the receptor's MM region folllowing the QM region.
"""
resid_num = np.loadtxt(self.residue_list)
approximated_res_list = []
for i in range(
int(statistics.median(resid_num))
- int(int(self.num_residues) / 2),
int(statistics.median(resid_num))
+ int(int(self.num_residues) / 2),
):
approximated_res_list.append(i)
# print(approximated_res_list)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_pdb)
df = ppdb.df["ATOM"][["residue_number"]]
res_list = list(set(df["residue_number"].to_list()))
res_mm_list = list(set(res_list).difference(approximated_res_list))
# print(res_mm_list)
res_mm_region_I_list = []
# TODO: This can probably be made into a single loop by comparing i
# to the maximum value within approximated_res_list
for i in res_mm_list:
for j in approximated_res_list:
if i < j:
res_mm_region_I_list.append(i)
res_mm_region_I_list = list(set(res_mm_region_I_list))
res_mm_region_II_list = list(
set(res_mm_list).difference(res_mm_region_I_list)
)
# print(res_mm_region_II_list)
ppdb.read_pdb(self.host_mm_pdb)
df = ppdb.df["ATOM"][["atom_number", "residue_number", "residue_name"]]
mm_region_I_index_nested_list = []
for i in res_mm_region_I_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_I_index_nested_list.append(indices)
mm_region_I_index_list = list(
itertools.chain.from_iterable(mm_region_I_index_nested_list)
)
df_atom = df.iloc[mm_region_I_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_I_atom_list = df_atom_number.values.tolist()
mm_region_I_atoms = []
mm_region_I_atoms.extend(mm_region_I_atom_list)
mm_region_II_index_nested_list = []
for i in res_mm_region_II_list:
indices = np.where(df["residue_number"] == i)
# TODO: again, this is strange code
indices = list(indices)[0]
indices = list(indices)
mm_region_II_index_nested_list.append(indices)
mm_region_II_index_list = list(
itertools.chain.from_iterable(mm_region_II_index_nested_list)
)
df_atom = df.iloc[mm_region_II_index_list]
df_atom_number = df_atom["atom_number"]
mm_region_II_atom_list = df_atom_number.values.tolist()
mm_region_II_atoms = []
mm_region_II_atoms.extend(mm_region_II_atom_list)
ppdb.read_pdb(self.host_mm_pdb)
len_atoms = []
for i in range(len(ppdb.df["ATOM"])):
len_atoms.append(i + 1)
assert len(mm_region_I_atoms) + len(mm_region_II_atoms) == len(len_atoms),\
"Sum of the atoms in the selected and non-selected region "\
"does not equal the length of list of total atoms."
np.savetxt(self.host_mm_region_I_atoms, mm_region_I_atoms, fmt="%i")
np.savetxt(self.host_mm_region_II_atoms, mm_region_II_atoms, fmt="%i")
def save_host_mm_regions_pdbs(self):
"""
Saves a PDB file for the receptor's MM region preceding
the QM region and saves another PDB file for the receptor's
MM region folllowing the QM region.
"""
mm_region_I_atoms = np.loadtxt(self.host_mm_region_I_atoms)
mm_region_I_atoms = [int(i) for i in mm_region_I_atoms]
mm_region_II_atoms = np.loadtxt(self.host_mm_region_II_atoms)
mm_region_II_atoms = [int(i) for i in mm_region_II_atoms]
# NOTE: this is a slightly confusing way to define the atoms to
# write to a PDB - the members that are *not* in a section, rather
# than the members that are.
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_II_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_I_pdb,
records=None,
gz=False,
append_newline=True,
)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_mm_pdb)
for i in mm_region_I_atoms:
ppdb.df["ATOM"] = ppdb.df["ATOM"][
ppdb.df["ATOM"]["atom_number"] != i
]
ppdb.to_pdb(
path=self.host_mm_region_II_pdb,
records=None,
gz=False,
append_newline=True,
)
def get_qm_mm_regions(self):
"""
Saves separate PDB files for the QM and MM regions.
QM regions comprise the QM region of the receptor
and the entire ligand where the MM region comprise
the non-selected QM regions of the receptor.
"""
with open(self.host_qm_pdb) as f1, open(self.qm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
with open(self.guest_pdb) as f1, open(self.qm_pdb, "a") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
with open(self.host_mm_pdb) as f1, open(self.mm_pdb, "w") as f2:
for line in f1:
if "ATOM" in line:
f2.write(line)
f2.write("END")
class PrepareGaussianGuest:
"""
A class used to prepare the QM engine input file (Gaussian)
for the ligand and run QM calculations with appropriate
keywords.
This class contain methods to write an input file (.com extension)
for the QM engine. It then runs a QM calculation with the given
basis set and functional. Checkpoint file is then converted to
a formatted checkpoint file. Output files (.log, .chk, and .fhck)
will then be used to extract ligand's force field parameters.
...
Attributes
----------
charge : int, optional
Charge of the ligand.
multiplicity: int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb: str, optional
Ligand PDB file with atom numbers beginning from 1.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_out_file: str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_out_file: str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="OPT",
frequency="FREQ",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6)",
gauss_out_file="guest.out",
fchk_out_file="guest_fchk.out",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.gauss_out_file = gauss_out_file
self.fchk_out_file = fchk_out_file
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
def write_input(self):
"""
Writes a Gaussian input file for the ligand.
"""
command_line_1 = "%Chk = " + self.guest_pdb[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = self.guest_pdb[:-4] + " " + "gaussian input file"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_2 = df[["x_coord", "y_coord", "z_coord"]]
df_merged = pd.concat([df_1, df_2], axis=1)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.guest_pdb[:-4] + ".com", "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand locally.
"""
execute_command = (
"g16"
+ " < "
+ self.guest_pdb[:-4]
+ ".com"
+ " > "
+ self.guest_pdb[:-4]
+ ".log"
)
with open(self.gauss_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.guest_pdb[:-4]
+ ".chk"
+ " "
+ self.guest_pdb[:-4]
+ ".fchk"
)
with open(self.fchk_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
class PrepareGaussianHostGuest:
"""
A class used to prepare the QM engine input file (Gaussian) for
the receptor - ligand complex and run the QM calculations with
the appropriate keywords.
This class contain methods to write an input file (.com extension)
for the QM engine for the receptor - ligand complex. It then runs
a QM calculation with the given basis set and functional. Checkpoint
file is then converted to a formatted checkpoint file. Output files
(.log, .chk, and .fhck) will then be used to extract charges for the
ligand and the receptor.
...
Attributes
----------
charge : int, optional
Total charge of the receptor - ligand complex.
multiplicity : int, optional
Spin Multiplicity (2S+1) of the ligand where S represents
the total spin of the ligand.
guest_pdb : str, optional
Ligand PDB file with atom numbers beginning from 1.
host_qm_pdb : str, optional
PDB file for the receptor's QM region.
n_processors : int, optional
Number of processors to be used for Gaussian program to run and
set in %NProcShared command of Gaussian.
memory : int, optional
Memory (in GB) to be used set in %Mem command of Gaussian.
functional: str, optional
Exchange/Correlation or hybrid functional to use in the Gaussian
QM calculation.
basis_set: str, optional
Basis set to use for the Gaussian QM calculation.
optimisation: str, optional
set to "OPT" to perform a geometry optimization on the ligand
specified in the system; else set to an empty string.
frequency: str, optional
set to "FREQ" for Gaussian to perform a frequency calculation;
else set to an empty string.
add_keywords_I: str, optional
Specifies the integration grid.
add_keywords_II: str, optional
Specifies the QM engine to select one of the methods for
analyzing the electron density of the system. Methods used
are based on fitting the molecular electrostatic potential.
Methods used are : POP=CHELPG (Charges from Electrostatic
Potentials using a Grid based method) and POP=MK
(Merz-Singh-Kollman scheme)
add_keywords_III: str, optional
Used to include the IOp keyword (to set the internal options to
specific values) in the Gaussian command.
gauss_system_out_file : str, optional
This file contains the output script obtained after running
the Gaussian QM calculation.
fchk_system_out_file : str, optional
Formatted checkpoint file obtained from the checkpoint file
using formchk command.
host_guest_input : str, optional
Gaussian input file (.com extension) for the receptor - ligand
QM region.
qm_guest_charge_parameter_file : str, optional
File containing the charges of ligand atoms and their corresponding
atoms. Charge obtained are the polarised charged due to the
surrounding receptor's region.
qm_host_charge_parameter_file : str, optional
File containing the charges of the QM region of the receptor.
qm_guest_atom_charge_parameter_file : str, optional
File containing the charges of ligand atoms. Charge obtained
are the polarised charged due to the surrounding receptor's region.
"""
def __init__(
self,
charge=0,
multiplicity=1,
guest_pdb="guest_init_II.pdb",
host_qm_pdb="host_qm.pdb",
n_processors=12,
memory=50,
functional="B3LYP",
basis_set="6-31G",
optimisation="",
frequency="",
add_keywords_I="INTEGRAL=(GRID=ULTRAFINE)",
add_keywords_II="POP(MK,READRADII)",
add_keywords_III="IOP(6/33=2,6/42=6) SCRF=PCM",
gauss_system_out_file="system_qm.out",
fchk_system_out_file="system_qm_fchk.out",
host_guest_input="host_guest.com",
qm_guest_charge_parameter_file="guest_qm_surround_charges.txt",
qm_host_charge_parameter_file="host_qm_surround_charges.txt",
qm_guest_atom_charge_parameter_file="guest_qm_atom_surround_charges.txt",
):
self.charge = charge
self.multiplicity = multiplicity
self.guest_pdb = guest_pdb
self.host_qm_pdb = host_qm_pdb
self.n_processors = n_processors
self.memory = memory
self.functional = functional
self.basis_set = basis_set
self.optimisation = optimisation
self.frequency = frequency
self.add_keywords_I = add_keywords_I
self.add_keywords_II = add_keywords_II
self.add_keywords_III = add_keywords_III
self.gauss_system_out_file = gauss_system_out_file
self.fchk_system_out_file = fchk_system_out_file
self.host_guest_input = host_guest_input
self.qm_guest_charge_parameter_file = qm_guest_charge_parameter_file
self.qm_host_charge_parameter_file = qm_host_charge_parameter_file
self.qm_guest_atom_charge_parameter_file = (
qm_guest_atom_charge_parameter_file
)
def write_input(self):
"""
Writes a Gaussian input file for the receptor - ligand QM region.
"""
command_line_1 = "%Chk = " + self.host_guest_input[:-4] + ".chk"
command_line_2 = "%Mem = " + str(self.memory) + "GB"
command_line_3 = "%NProcShared = " + str(self.n_processors)
command_line_4 = (
"# "
+ self.functional
+ " "
+ self.basis_set
+ " "
+ self.optimisation
+ " "
+ self.frequency
+ " "
+ self.add_keywords_I
+ " "
+ self.add_keywords_II
+ " "
+ self.add_keywords_III
)
command_line_5 = " "
command_line_6 = "Gaussian Input File"
command_line_7 = " "
command_line_8 = str(self.charge) + " " + str(self.multiplicity)
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_1 = pd.concat([df_1, df_2, df_3], axis=1)
ppdb = PandasPdb()
ppdb.read_pdb(self.host_qm_pdb)
df = ppdb.df["ATOM"]
df_1 = ppdb.df["ATOM"]["element_symbol"]
df_1.columns = ["atom"]
df_3 = df[["x_coord", "y_coord", "z_coord"]]
df_2 = pd.Series(["0"] * len(df), name="decide_freeze")
df_merged_2 = pd.concat([df_1, df_2, df_3], axis=1)
df_merged = pd.concat([df_merged_1, df_merged_2], axis=0)
command_line_9 = df_merged.to_string(header=False, index=False)
command_line_10 = " "
command = [
command_line_1,
command_line_2,
command_line_3,
command_line_4,
command_line_5,
command_line_6,
command_line_7,
command_line_8,
command_line_9,
command_line_10,
]
commands = "\n".join(command)
with open(self.host_guest_input, "w") as f:
f.write(commands)
def run_gaussian(self):
"""
Runs the Gaussian QM calculation for the ligand - receptor region
locally.
"""
execute_command = (
"g16"
+ " < "
+ self.host_guest_input
+ " > "
+ self.host_guest_input[:-4]
+ ".log"
)
with open(self.gauss_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_fchk(self):
"""
Converts the Gaussian checkpoint file (.chk) to a formatted checkpoint
file (.fchk).
"""
execute_command = (
"formchk"
+ " "
+ self.host_guest_input[:-4]
+ ".chk"
+ " "
+ self.host_guest_input[:-4]
+ ".fchk"
)
with open(self.fchk_system_out_file, "w+") as f:
sp.run(
execute_command, shell=True, stdout=f, stderr=sp.STDOUT,
)
def get_qm_host_guest_charges(self):
"""
Extract charge information for the receptor - ligand QM region.
"""
log_file = self.host_guest_input[:-4] + ".log"
with open(log_file, "r") as f:
lines = f.readlines()
for i in range(len(lines)):
if "Fitting point charges to electrostatic potential" in lines[i]:
to_begin = int(i)
if " Sum of ESP charges =" in lines[i]:
to_end = int(i)
# Why + 4?
charges = lines[to_begin + 4 : to_end]
charge_list = []
for i in range(len(charges)):
charge_list.append(charges[i].strip().split())
charge_list_value = []
atom_list = []
for i in range(len(charge_list)):
charge_list_value.append(charge_list[i][2])
atom_list.append(charge_list[i][1])
ppdb = PandasPdb()
ppdb.read_pdb(self.guest_pdb)
df_guest = ppdb.df["ATOM"]
number_guest_atoms = df_guest.shape[0]
data_tuples = list(zip(atom_list, charge_list_value))
df_charge = | pd.DataFrame(data_tuples, columns=["Atom", "Charge"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
"""
analyze and plot results of experiments
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sb
import yaml
#E2: How large can I make my output domain without loosing skill?
E2_results = pd.read_csv('param_optimization/E2_results_t2m_34_t2m.csv',sep =';')
#E1:
E1_results = pd.read_csv('param_optimization/E1_results_t2m_34_t2m.csv',sep =';')
#E1 label smoothing
E1_smooth_results = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
#E1 refined
E1_ref_results= pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_ls0.4.csv',sep =';')
E1_ref_add = pd.read_csv('param_optimization/E1_label_smoothing_results_t2m_34_t2m_14_10_2021.csv',sep =';')
E1_ref_add = E1_ref_add.where(E1_ref_add.label_smoothing == 0.4).dropna()
E1_ref_results = pd.concat([E1_ref_results, E1_ref_add])
E1_ref_results.reset_index(inplace = True)
E1_ref_results_06 = pd.read_csv('param_optimization/E1_refined_results_t2m_34_t2m_0.6.csv',sep =';')
E1_ref_results_06 = pd.concat([E1_ref_results_06, E1_ref_add])
E1_ref_results_06.reset_index(inplace = True)
#E4
E4_results = pd.read_csv('param_optimization/E4_results_t2m_34_t2m_all.csv',sep =';')
#E3
E3_results01 = pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_0_1.csv',sep =';')
E3_results25 = | pd.read_csv('param_optimization/E3_results_t2m_34_t2m_folds_2_5.csv',sep =';') | pandas.read_csv |
""" Exploratory data analysis EDA on insta-cart data-set.
"""
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
import seaborn as sns
def load_data():
""" Loads the data.
:return: dictionary of DataFrames with file names as keys.
"""
data_in_dict = dict()
data_path = Path.cwd().parent.joinpath('data')
for file in list(sorted(data_path.rglob('*.csv'))):
data_in_dict[file.stem] = | pd.read_csv(file) | pandas.read_csv |
import pytest
import pandas as pd
from pandas import compat
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import assert_frame_equal, assert_raises_regex
COMPRESSION_TYPES = [None, 'bz2', 'gzip',
pytest.param('xz', marks=td.skip_if_no_lzma)]
def decompress_file(path, compression):
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.open(path, 'rb')
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
result = f.read().decode('utf8')
f.close()
return result
@pytest.mark.parametrize('compression', COMPRESSION_TYPES)
def test_compression_roundtrip(compression):
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
df.to_json(path, compression=compression)
assert_frame_equal(df, pd.read_json(path, compression=compression))
# explicitly ensure file was compressed.
uncompressed_content = decompress_file(path, compression)
assert_frame_equal(df, pd.read_json(uncompressed_content))
def test_compress_zip_value_error():
df = pd.DataFrame([[0.123456, 0.234567, 0.567567],
[12.32112, 123123.2, 321321.2]],
index=['A', 'B'], columns=['X', 'Y', 'Z'])
with tm.ensure_clean() as path:
import zipfile
pytest.raises(zipfile.BadZipfile, df.to_json, path, compression="zip")
def test_read_zipped_json():
uncompressed_path = tm.get_data_path("tsframe_v012.json")
uncompressed_df = | pd.read_json(uncompressed_path) | pandas.read_json |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy as np
import datetime
import time
import pandas as pd
from statsmodels.tsa.seasonal import seasonal_decompose
from pyramid.arima import auto_arima
from statsmodels.tsa.arima_model import ARIMA
from datetime import timedelta
import statsmodels.api as sm
from main.common.dao.mongodb_utils import SingleMongodb
from main.common import log_utils, exception_utils, configration
LOG = log_utils.getLogger(module_name=__name__)
class Offline:
def __init__(self, offline_config: dict):
"""
构造函数,从config解析配置
:param offline_config: 离线配置
"""
# 输入配置
input_properties = exception_utils.nullException(offline_config, "input_config")
self.inputDatabase = exception_utils.nullException(input_properties, "database")
self.inputTable = exception_utils.nullException(input_properties, "table")
# 输出配置
output_properties = exception_utils.nullException(offline_config, "output_config")
self.outputDatabase = exception_utils.nullException(output_properties, "database")
self.outputTable = exception_utils.nullException(output_properties, "table")
#参数配置
alg_argument = exception_utils.nullException(offline_config, "alg_argument")
self.interval = exception_utils.nullException(alg_argument, "interval")
self.startTs = exception_utils.nullException(alg_argument, "start_time")
self.did = exception_utils.nullException(alg_argument, "did")
#self.ts = self.get_data().iloc[:1144]
self.test_size = int(exception_utils.nullException(alg_argument, "test_size"))
#self.train_size = len(self.ts) - self.test_size
def get_data(self, ip):
portId = self.did[ip]
singleMongodb = SingleMongodb()
client = singleMongodb.getClient()
collection = singleMongodb.getCollection(self.inputDatabase, self.inputTable, client)
data = pd.DataFrame(columns=['ts', 'did', 'port', 'inOctets', 'outOctets'])
dataSet = collection.find({'did': "ip:"+ip, "ts": {"$gte": | pd.to_datetime(self.startTs) | pandas.to_datetime |
import sys
import os
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
plt.style.use("custom_standard")
# %%
def filter_mT_table(df, kd_up_lim, SE_upper_lim, kd_low_lim=0, drop_dup=True):
"""
filters existing masstitr table
filters out seqs containing * or X characters
removes seqs not matching xxxPxExxx motif
Parameters
----------
SE_upper_lim
Will filter data for SE values with SE <= `SE_upper_lim`
kd_up_lim
Will remove data outside range: Kd < `kd_up_lim`
kd_low_lim
Will remove data outside range: `kd_low_lim` < Kd
drop_dup : :obj:`bool`, optional
Whether or not to drop duplicate sequences
Returns
------
filtered dataframe
"""
# drop duplicates first just in case only a single duplicate makes the cutoff but the other(s) do not
# for example, if LNLPEESDW had 2 entries with diff Kd values with one of them > kd_up_lim and the other < kd_up_lim
if drop_dup:
df = df.drop_duplicates(subset="AA_seq", keep=False)
df = df[~df.AA_seq.str.contains("[\*X]")]
df = df[df.AA_seq.str.contains(r"...P.E...")]
df = df[df["SE"] <= SE_upper_lim]
# df = df[df["R2"] >= R2_lower_lim]
df = df[(df["Kd"] < kd_up_lim) & (df["Kd"] > kd_low_lim)]
return df
def correlation(df1, df2, plots=False):
"""
correlation plot and calculation between masstitr experiments
correltaion between columns labelled `Kd`.
Parameters
----------
df1
first dataframe
df2
second dataframe
Returns
-------
p_r
pearson correlation R value
s_r
spearman correlation R value
n_points
number of points in correlation
"""
df = pd.merge(df1, df2, on='AA_seq', suffixes=('_1','_2'))
df = df.reset_index(drop=True)
if plots:
fig, ax = plt.subplots(figsize=[4, 4])
df.plot.scatter(x='Kd_1', y='Kd_2', loglog=True, ax=ax)
fig, ax = plt.subplots(figsize=[4, 4])
df.plot.scatter(x='Kd_1', y='Kd_2', ax=ax)
p_r = df.corr("pearson").loc['Kd_1','Kd_2']
s_r = df.corr("spearman").loc['Kd_1','Kd_2']
n_points = len(df)
return p_r, s_r, n_points
def correlation_screen(df1, df2, p_fit_err_cutoffs, kd_cutoffs):
p_mat = | pd.DataFrame(columns=p_fit_err_cutoffs, index=kd_cutoffs) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ======================================================================================================================== #
# Project : Explainable Recommendation (XRec) #
# Version : 0.1.0 #
# File : \criteo.py #
# Language : Python 3.8 #
# ------------------------------------------------------------------------------------------------------------------------ #
# Author : <NAME> #
# Email : <EMAIL> #
# URL : https://github.com/john-james-ai/xrec #
# ------------------------------------------------------------------------------------------------------------------------ #
# Created : Sunday, December 26th 2021, 3:56:00 pm #
# Modified : Friday, January 14th 2022, 6:46:32 pm #
# Modifier : <NAME> (<EMAIL>) #
# ------------------------------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2021 Bryant St. Labs #
# ======================================================================================================================== #
from abc import ABC, abstractmethod
import os
import pandas as pd
import numpy as np
import logging
import math
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from cvr.utils.printing import Printer
# ------------------------------------------------------------------------------------------------------------------------ #
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------------------------------------------------ #
DEFAULT_N_JOBS = 18
# ======================================================================================================================== #
# OUTLIER DETECTION #
# ======================================================================================================================== #
class OutlierDetector:
"""Outlier detection with selected outlier detection algorithms.
Args:
criterion (str): Indicates criterion for final determination of an observation, given results
from various outlier detection algorithms. Values include 'any', 'all', 'vote' for
majority vote.
numeric_algorithms(dict): Dictionary of instantiated numeric outlier detection algorithms
categorical_algorithms(dict): Dictionary of instantiated categorical outlier detection algorithms
random_state (int): Pseudo random generator seed for Isolation Forest
Attributes:
results_: Contains a nested dictionary with three numeric, categorical and combined outlier labels
summary_:
Returns:
Numpy array containing the labels labels
"""
def __init__(
self,
criterion="vote",
numeric_algorithms: dict = None,
categorical_algorithms: dict = None,
random_state=None,
) -> None:
self._criterion = criterion
self._random_state = random_state
self.results_ = {}
self._n = None
# Numeric Outlier Detection Algorithms
self._numeric_algorithms = (
numeric_algorithms
if numeric_algorithms
else {
"Z-Score": OutlierZScore(),
"IQR": OutlierIQR(),
"Robust Covariance": OutlierEllipticEnvelope(random_state=random_state),
"Isolation Forest": OutlierIsolationForest(random_state=random_state),
"Local Outlier Factor": OutlierLocalOutlierFactor(),
}
)
# Categorical Outlier Detection Algorithms
self._categorical_algorithms = (
categorical_algorithms
if categorical_algorithms
else {
"Attribute Value Frequency": OutlierAVF(),
"Square of Complement Frequency": OutlierSCF(),
"Weighted Attribute Value Frequency": OutlierWAVF(),
}
)
# Algorithms for numeric and categorical (object) data outlier detection
self._detectors = {
"number": self._numeric_algorithms,
"object": self._categorical_algorithms,
}
def fit(self, X, y=None):
"""Fits several outlier detection algorithms.
Args:
X (pd.DataFrame): Input
"""
self._n = len(X)
labels_ensemble = {}
for datatype, algorithms in self._detectors.items():
labels_datatype = {}
X_datatype = X.select_dtypes(include=datatype)
for name, algorithm in algorithms.items():
name_datatype = name + " (" + datatype + ")"
print(
"Currently fitting outlier detector {}.".format(name_datatype),
end=" ",
)
algorithm.fit(X_datatype)
labels = algorithm.predict(X_datatype)
o = labels.sum()
p = round(o / self._n * 100, 2)
print("Detected {} outliers, {}% of the data.".format(str(o), str(p)))
labels_datatype[name] = labels
labels_ensemble[name_datatype] = labels
self.results_[datatype] = self._compute_results(labels_datatype, datatype)
# Combine results for numeric and categorical outlier labels
self.results_["ensemble"] = self._compute_results(labels_ensemble, "combined")
def predict(self, X) -> pd.DataFrame:
o = self.results_["ensemble"]["labels"].sum()
p = round(o / self._n * 100, 2)
print(
"\nThe ensemble detected {} outliers constituting {}% of the data using the {} criterion.".format(
str(o), str(p), str(self._criterion)
)
)
return self.results_["ensemble"]["labels"].to_frame().reset_index()
def _compute_results(self, labels: dict, datatype: str) -> dict:
"""Aggregates results for several outlier detection algorithms."""
d = {}
# Store labels by algorithm
d["labels_by_algorithm"] = pd.DataFrame.from_dict(labels, orient="columns")
# Store aggregated labels based upon the criteria
d["labels_any"] = d["labels_by_algorithm"].any(axis=1)
d["labels_all"] = d["labels_by_algorithm"].all(axis=1)
d["labels_vote"] = d["labels_by_algorithm"].mean(axis=1) > 0.5
# Store the labels according to the selected criterion
if self._criterion == "any":
d["labels"] = d["labels_any"]
elif self._criterion == "all":
d["labels"] = d["labels_all"]
else:
d["labels"] = d["labels_vote"]
# Update labels by algorithms to include the labels aggregated by the three criteria
all_desc = self._get_label_description(datatype, " (All)")
any_desc = self._get_label_description(datatype, " (Any)")
vote_desc = self._get_label_description(datatype, " (Majority Vote)")
ensemble_desc = self._get_label_description(datatype, "")
d["labels_by_algorithm"][all_desc] = d["labels_all"]
d["labels_by_algorithm"][any_desc] = d["labels_any"]
d["labels_by_algorithm"][vote_desc] = d["labels_vote"]
d["labels_by_algorithm"][ensemble_desc] = d["labels"]
# Aggregate the total counts for all algorithms for selected and criteria
d["summary"] = d["labels_by_algorithm"].sum()
return d
def _get_label_description(self, datatype: str, criterion: str) -> str:
if datatype == "number":
return "Numeric Ensemble" + criterion
elif datatype == "object":
return "Categorical Ensemble" + criterion
else:
return "Combined Ensemble" + criterion
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS Z-SCORE #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierZScore:
def __init__(self, threshold: int = 3) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Computes the zscores for a 2 dimensional array.
Args:
X (pd.DataFrame): Input
"""
# Convert dataframe to numpy array.
X = X.select_dtypes(include="number").values
z = stats.zscore(X)
labels = np.where(np.abs(z) > self._threshold, 1, 0)
self._labels = np.any(labels, axis=1)
def predict(self, X):
"""Returns the prediction
Args:
X (np.array): Input
"""
return self._labels
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS IQR #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierIQR:
def __init__(self, threshold: float = 1.5) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Computes the zscores for a 2 dimensional array.
X (pd.DataFrame): Input
"""
# Convert dataframe to numpy array.
X = X.select_dtypes(include="number").values
q1, q3 = np.percentile(a=X, q=[25, 75], axis=0)
iqr = q3 - q1
lower = q1 - (iqr * self._threshold)
upper = q3 + (iqr * self._threshold)
labels = np.where(np.greater(X, upper) | np.less(X, lower), 1, 0)
self._labels = np.any(labels, axis=1)
def predict(self, X) -> np.array:
return self._labels
# ======================================================================================================================== #
# SKLEARN OUTLIER DETECTOR WRAPPERS #
# ======================================================================================================================== #
class OutliersSKLearn(ABC):
"""Abstract base class for sklearn outlier detectors wrappers.
The SKLearn classifiers cannot handle NaNs. Hence, NaNs were replaced as follows:
- Numeric variables replace NaNs with the mean.
- Categorical variables replace NaNs with -1
"""
def __init__(
self,
contamination: float = None,
n_jobs: int = DEFAULT_N_JOBS,
random_state: int = None,
**kwargs
) -> None:
self._contamination = contamination
self._n_jobs = n_jobs
self._random_state = random_state
self._clf = self.get_clf()
@abstractmethod
def get_clf(self) -> None:
pass
def fit(self, X: pd.DataFrame, y: np.ndarray = None) -> None:
X = X.select_dtypes(include="number")
X = self._impute(X).values
self._clf.fit(X)
def predict(self, X: pd.DataFrame) -> np.ndarray:
X = X.select_dtypes(include="number")
X = self._impute(X).values
labels = self._clf.predict(X)
return np.where(labels == -1, 1, 0)
def _impute(self, X) -> pd.DataFrame:
"""Imputes missing numerics with their means and missing categoricals with '-1'"""
imputer = {
"sale": 0,
"sales_amount": X["sales_amount"].mean(),
"conversion_time_delay": X["conversion_time_delay"].mean(),
"click_ts": X["click_ts"].mean(),
"n_clicks_1week": X["n_clicks_1week"].mean(),
"product_price": X["product_price"].mean(),
"product_age_group": "-1",
"device_type": "-1",
"audience_id": "-1",
"product_gender": "-1",
"product_brand": "-1",
"product_category_1": "-1",
"product_category_2": "-1",
"product_category_3": "-1",
"product_category_4": "-1",
"product_category_5": "-1",
"product_category_6": "-1",
"product_category_7": "-1",
"product_country": "-1",
"product_id": "-1",
"product_title": "-1",
"partner_id": "-1",
"user_id": "-1",
}
X.fillna(value=imputer, inplace=True)
return X
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ELLIPTIC ENVELOPE #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierEllipticEnvelope(OutliersSKLearn):
"""Wrapper for sklearn's Elliptic Envelope class which accepts dataframes as input.
Args:
support_fraction (float): The proportion of points to be included in the support of the raw MCD estimate. If None, the minimum value of support_fraction will be used within the algorithm: [n_sample + n_features + 1] / 2. Range is (0, 1). Default is None.
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Range is (0, 0.5]. Default is 0.1
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(
self,
support_fraction: float = 0.6,
contamination: float = 0.1,
random_state: int = None,
) -> None:
self._support_fraction = support_fraction
super(OutlierEllipticEnvelope, self).__init__(
contamination=contamination, random_state=random_state
)
def get_clf(self):
return EllipticEnvelope(
support_fraction=self._support_fraction,
contamination=self._contamination,
random_state=self._random_state,
)
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ISOLATION FOREST #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierIsolationForest(OutliersSKLearn):
"""Wrapper for sklearn's Isolation Forest class which accepts dataframes as input.
Args:
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in
the data set. Range is (0, 0.5]. Default is 0.1
n_jobs (int). The number of jobs to run in parallel.
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(
self,
contamination="auto",
n_jobs: int = DEFAULT_N_JOBS,
random_state: int = None,
) -> None:
super(OutlierIsolationForest, self).__init__(
contamination=contamination, n_jobs=n_jobs, random_state=random_state
)
def get_clf(self):
return IsolationForest(
contamination=self._contamination,
n_jobs=self._n_jobs,
random_state=self._random_state,
)
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ISOLATION FOREST #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierLocalOutlierFactor(OutliersSKLearn):
"""Wrapper for sklearn's Local Outlier Factor class which accepts dataframes as input.
Args:
contamination (float): The amount of contamination of the data set, i.e. the proportion of outliers in
the data set. Range is (0, 0.5]. Default is 0.1
n_jobs (int). The number of jobs to run in parallel.
random_state (int): Pseudo random generator seed. Default is None.
"""
def __init__(self, contamination="auto", n_jobs: int = DEFAULT_N_JOBS) -> None:
super(OutlierLocalOutlierFactor, self).__init__(
contamination=contamination, n_jobs=n_jobs
)
def get_clf(self):
return LocalOutlierFactor(
contamination=self._contamination, n_jobs=self._n_jobs
)
def predict(self, X: pd.DataFrame) -> None:
X = X.select_dtypes(include="number")
X = self._impute(X).values
labels = self._clf.fit_predict(X)
return np.where(labels == -1, 1, 0)
# ======================================================================================================================== #
# OUTLIER CATEGORICAL ANALYSIS #
# ======================================================================================================================== #
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS ATTRIBUTE VALUE FREQUENCY #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierAVF:
"""Detects outliers using the Attribute Value Frequency method.
Args:
threshold (float): The threshold used to determine the lowest M AVF scores. Assuming frequencies are normally
distributed.
"""
def __init__(self, threshold: float = 0.1) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Fits the model
X (pd.DataFrame): Input
"""
X = X.select_dtypes(include="object")
df = pd.DataFrame()
# Iterative over columns and create dataframe that contains the frequencies of the values.
for col in X.columns:
# Create a one column dataframe
df1 = X[col].to_frame()
# Compute value counts and convert series to frame
df2 = df1.value_counts().to_frame().reset_index()
df2.columns = ["value", "count"]
# Merge the two dataframes and extract the column with the frequencies and add to new dataframe
merged = pd.merge(df1, df2, how="left", left_on=col, right_on="value")
df[col] = merged["count"]
# We need to determine a threhold in terms of the observations with the M lowest AVF scores.
# Taking the assumption that frequences are normally distributed, we can select the
# observations with avf scores below a number of standard deviations below the mean avf.
avf = df.mean(axis=1)
n = len(df)
k = math.ceil(n * self._threshold)
threshold = avf.sort_values().head(k).max()
self._labels = avf < threshold
def predict(self, X) -> np.array:
# Convert the dataframe to a numpy array to comport with the other estimators.
return self._labels.values
# ------------------------------------------------------------------------------------------------------------------------ #
# OUTLIER ANALYSIS WEIGHTED ATTRIBUTE VALUE FREQUENCY #
# ------------------------------------------------------------------------------------------------------------------------ #
class OutlierWAVF:
"""Detects outliers using the Weighted Attribute Value Frequency method.
Args:
threshold (float): The threshold used to determine the lowest M WAVF scores. Assuming frequencies are normally
distributed.
"""
def __init__(self, threshold: float = 0.1) -> None:
self._threshold = threshold
self._labels = None
def fit(self, X, y=None) -> None:
"""Fits the model
X (pd.DataFrame): Input
"""
X = X.select_dtypes(include="object")
weights = self._compute_weights(X)
df = pd.DataFrame()
# Iterative over columns and create dataframe that contains the frequencies of the values.
for col in X.columns:
# Create a one column dataframe
df1 = X[col].to_frame()
# Compute value counts and convert series to frame
df2 = df1.value_counts().to_frame().reset_index()
df2.columns = ["value", "count"]
# Merge the two dataframes and extract the column with the frequencies and add to new dataframe
merged = | pd.merge(df1, df2, how="left", left_on=col, right_on="value") | pandas.merge |
#!/usr/bin/env python
# coding: utf-8
# In[24]:
import numpy
import pandas as pd
import tensorflow as tf
from PyEMD import CEEMDAN
import warnings
warnings.filterwarnings("ignore")
### import the libraries
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from math import sqrt
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
def percentage_error(actual, predicted):
res = numpy.empty(actual.shape)
for j in range(actual.shape[0]):
if actual[j] != 0:
res[j] = (actual[j] - predicted[j]) / actual[j]
else:
res[j] = predicted[j] / np.mean(actual)
return res
def mean_absolute_percentage_error(y_true, y_pred):
return numpy.mean(numpy.abs(percentage_error(numpy.asarray(y_true), numpy.asarray(y_pred)))) * 100
# In[25]:
def lr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import tensorflow as tf
numpy.random.seed(1234)
tf.random.set_seed(1234)
from sklearn.linear_model import LinearRegression
grid = LinearRegression()
grid.fit(X,y)
y_pred_train_lr= grid.predict(X)
y_pred_test_lr= grid.predict(X1)
y_pred_train_lr=pd.DataFrame(y_pred_train_lr)
y_pred_test_lr=pd.DataFrame(y_pred_test_lr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_lr= sc_y.inverse_transform (y_pred_test_lr)
y_pred_train1_lr=sc_y.inverse_transform (y_pred_train_lr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_lr)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_lr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_lr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_lr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_lr)
return mape,rmse,mae
# In[26]:
def svr_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.svm import SVR
grid = SVR()
grid.fit(X,y)
y_pred_train_svr= grid.predict(X)
y_pred_test_svr= grid.predict(X1)
y_pred_train_svr=pd.DataFrame(y_pred_train_svr)
y_pred_test_svr=pd.DataFrame(y_pred_test_svr)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_svr= sc_y.inverse_transform (y_pred_test_svr)
y_pred_train1_svr=sc_y.inverse_transform (y_pred_train_svr)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_svr=pd.DataFrame(y_pred_test1_svr)
y_pred_train1_svr=pd.DataFrame(y_pred_train1_svr)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_svr)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_svr))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_svr)
return mape,rmse,mae
# In[27]:
def ann_model(datass,look_back,data_partition):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
trainX = numpy.reshape(X, (X.shape[0], 1, X.shape[1]))
testX = numpy.reshape(X1, (X1.shape[0], 1, X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.neural_network import MLPRegressor
model= MLPRegressor(random_state=1,activation='tanh').fit(X,y)
numpy.random.seed(1234)
# make predictions
y_pred_train = model.predict(X)
y_pred_test = model.predict(X1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y1=pd.DataFrame(y1)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y_test= sc_y.inverse_transform (y1)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[28]:
def rf_model(datass,look_back,data_partition,max_features):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train_rf= grid.predict(X)
y_pred_test_rf= grid.predict(X1)
y_pred_train_rf=pd.DataFrame(y_pred_train_rf)
y_pred_test_rf=pd.DataFrame(y_pred_test_rf)
y1=pd.DataFrame(y1)
y=pd.DataFrame(y)
y_pred_test1_rf= sc_y.inverse_transform (y_pred_test_rf)
y_pred_train1_rf=sc_y.inverse_transform (y_pred_train_rf)
y_test= sc_y.inverse_transform (y1)
y_train= sc_y.inverse_transform (y)
y_pred_test1_rf=pd.DataFrame(y_pred_test1_rf)
y_pred_train1_rf=pd.DataFrame(y_pred_train1_rf)
y_test= pd.DataFrame(y_test)
#summarize the fit of the model
mape=mean_absolute_percentage_error(y_test,y_pred_test1_rf)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1_rf))
mae=metrics.mean_absolute_error(y_test,y_pred_test1_rf)
return mape,rmse,mae
# In[29]:
def lstm_model(datass,look_back,data_partition,max_features,epoch,batch_size,neuron,lr,optimizer):
datasets=datass.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
trainX1 = numpy.reshape(X, (X.shape[0],1,X.shape[1]))
testX1 = numpy.reshape(X1, (X1.shape[0],1,X1.shape[1]))
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
neuron=neuron
model = Sequential()
model.add(LSTM(units = neuron,input_shape=(trainX1.shape[1], trainX1.shape[2])))
model.add(Dense(1))
optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(loss='mse',optimizer=optimizer)
# model.summary()
# Fitting the RNN to the Training s
model.fit(trainX1, y, epochs = epoch, batch_size = batch_size,verbose=0)
# make predictions
y_pred_train = model.predict(trainX1)
y_pred_test = model.predict(testX1)
y_pred_test= numpy.array(y_pred_test).ravel()
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_test1= sc_y.inverse_transform (y_pred_test)
y1=pd.DataFrame(y1)
y_test= sc_y.inverse_transform (y1)
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn import metrics
mape=mean_absolute_percentage_error(y_test,y_pred_test1)
rmse= sqrt(mean_squared_error(y_test,y_pred_test1))
mae=metrics.mean_absolute_error(y_test,y_pred_test1)
return mape,rmse,mae
# In[30]:
###################################################hybrid based ceemdan####################################################
def hybrid_ceemdan_rf(datass,look_back,data_partition,max_features):
import numpy as np
import pandas as pd
dfs=datass
s = dfs.values
emd = CEEMDAN(epsilon=0.05)
emd.noise_seed(12345)
IMFs = emd(s)
full_imf=pd.DataFrame(IMFs)
data_imf=full_imf.T
import pandas as pd
pred_test=[]
test_ori=[]
pred_train=[]
train_ori=[]
for col in data_imf:
datasetss2=pd.DataFrame(data_imf[col])
datasets=datasetss2.values
train_size = int(len(datasets) * data_partition)
test_size = len(datasets) - train_size
train, test = datasets[0:train_size], datasets[train_size:len(datasets)]
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
X_train=pd.DataFrame(trainX)
Y_train=pd.DataFrame(trainY)
X_test=pd.DataFrame(testX)
Y_test=pd.DataFrame(testY)
sc_X = StandardScaler()
sc_y = StandardScaler()
X= sc_X.fit_transform(X_train)
y= sc_y.fit_transform(Y_train)
X1= sc_X.fit_transform(X_test)
y1= sc_y.fit_transform(Y_test)
y=y.ravel()
y1=y1.ravel()
import numpy
numpy.random.seed(1234)
import tensorflow as tf
tf.random.set_seed(1234)
from sklearn.ensemble import RandomForestRegressor
grid = RandomForestRegressor(max_features=max_features)
grid.fit(X,y)
y_pred_train= grid.predict(X)
y_pred_test= grid.predict(X1)
y_pred_test=pd.DataFrame(y_pred_test)
y_pred_train=pd.DataFrame(y_pred_train)
y1=pd.DataFrame(y1)
y= | pd.DataFrame(y) | pandas.DataFrame |
"""
Computational Cancer Analysis Library
Authors:
Huwate (Kwat) Yeerna (Medetgul-Ernar)
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
<NAME>
<EMAIL>
Computational Cancer Analysis Laboratory, UCSD Cancer Center
"""
from os.path import isfile
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.cm import bwr, gist_rainbow
from matplotlib.colorbar import ColorbarBase, make_axes
from matplotlib.colors import (ColorConverter, LinearSegmentedColormap,
ListedColormap, Normalize)
from matplotlib.gridspec import GridSpec
from matplotlib.pyplot import (figure, gca, savefig, sca, subplot, suptitle,
tight_layout)
from numpy import array, unique
from pandas import DataFrame, Series, isnull
from seaborn import (barplot, boxplot, clustermap, despine, distplot, heatmap,
set_style, violinplot)
from .d2 import get_dendrogram_leaf_indices, normalize_2d_or_1d
from .file import establish_filepath
# ==============================================================================
# Style
# ==============================================================================
FIGURE_SIZE = (16, 10)
SPACING = 0.05
FONT_LARGEST = {'fontsize': 24, 'weight': 'bold', 'color': '#220530'}
FONT_LARGER = {'fontsize': 20, 'weight': 'bold', 'color': '#220530'}
FONT_STANDARD = {'fontsize': 16, 'weight': 'bold', 'color': '#220530'}
FONT_SMALLER = {'fontsize': 12, 'weight': 'bold', 'color': '#220530'}
# Color maps
C_BAD = 'wheat'
# Continuous 1
CMAP_CONTINUOUS = bwr
CMAP_CONTINUOUS.set_bad(C_BAD)
# Continuous 2
reds = [0.26, 0.26, 0.26, 0.39, 0.69, 1, 1, 1, 1, 1, 1]
greens_half = [0.26, 0.16, 0.09, 0.26, 0.69]
colordict = {
'red':
tuple([(0.1 * i, r, r) for i, r in enumerate(reds)]),
'green':
tuple([
(0.1 * i, r, r)
for i, r in enumerate(greens_half + [1] + list(reversed(greens_half)))
]),
'blue':
tuple([(0.1 * i, r, r) for i, r in enumerate(reversed(reds))])
}
CMAP_CONTINUOUS_ASSOCIATION = LinearSegmentedColormap('association', colordict)
CMAP_CONTINUOUS_ASSOCIATION.set_bad(C_BAD)
# Categorical
CMAP_CATEGORICAL = gist_rainbow
CMAP_CATEGORICAL.set_bad(C_BAD)
# Binary
CMAP_BINARY = ListedColormap(['#cdcdcd', '#404040'])
CMAP_BINARY.set_bad(C_BAD)
DPI = 300
# ==============================================================================
# Functions
# ==============================================================================
def plot_points(*args,
title='',
xlabel='',
ylabel='',
filepath=None,
file_extension='pdf',
dpi=DPI,
ax=None,
**kwargs):
"""
:param args:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
ax = gca()
if 'linestyle' not in kwargs:
kwargs['linestyle'] = ''
if 'marker' not in kwargs:
kwargs['marker'] = '.'
ax.plot(*args, **kwargs)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_distribution(a,
bins=None,
hist=True,
kde=True,
rug=False,
fit=None,
hist_kws=None,
kde_kws=None,
rug_kws=None,
fit_kws=None,
color=None,
vertical=False,
norm_hist=False,
axlabel=None,
label=None,
ax=None,
title='',
xlabel='',
ylabel='Frequency',
filepath=None,
file_extension='pdf',
dpi=DPI):
"""
:param a:
:param bins:
:param hist:
:param kde:
:param rug:
:param fit:
:param hist_kws:
:param kde_kws:
:param rug_kws:
:param fit_kws:
:param color:
:param vertical:
:param norm_hist:
:param axlabel:
:param label:
:param ax:
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:return: None
"""
if not ax:
figure(figsize=FIGURE_SIZE)
distplot(
a,
bins=bins,
hist=hist,
kde=kde,
rug=rug,
fit=fit,
hist_kws=hist_kws,
kde_kws=kde_kws,
rug_kws=rug_kws,
fit_kws=fit_kws,
color=color,
vertical=vertical,
norm_hist=norm_hist,
axlabel=axlabel,
label=label,
ax=ax)
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_violin_box_or_bar(x=None,
y=None,
hue=None,
data=None,
order=None,
hue_order=None,
bw='scott',
cut=2,
scale='count',
scale_hue=True,
gridsize=100,
width=0.8,
inner='quartile',
split=False,
orient=None,
linewidth=None,
color=None,
palette=None,
saturation=0.75,
ax=None,
fliersize=5,
whis=1.5,
notch=False,
ci=95,
n_boot=1000,
units=None,
errcolor='0.26',
errwidth=None,
capsize=None,
violin_or_box='violin',
colors=(),
figure_size=FIGURE_SIZE,
title=None,
xlabel=None,
ylabel=None,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot violin plot.
:param x:
:param y:
:param hue:
:param data:
:param order:
:param hue_order:
:param bw:
:param cut:
:param scale:
:param scale_hue:
:param gridsize:
:param width:
:param inner:
:param split:
:param orient:
:param linewidth:
:param color:
:param palette:
:param saturation:
:param ax:
:param fliersize:
:param whis:
:param notch:
:param ci:
:param n_boot:
:param units:
:param errcolor:
:param errwidth:
:param capsize:
:param violin_or_box:
:param colors: iterable;
:param figure_size: tuple;
:param title:
:param xlabel:
:param ylabel:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
# Initialize a figure
if not ax:
figure(figsize=figure_size)
if isinstance(x, str):
x = data[x]
if isinstance(y, str):
y = data[y]
if not palette:
palette = assign_colors_to_states(x, colors=colors)
if len(set([v for v in y
if v and ~isnull(v)])) <= 2: # Use barplot for binary
barplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
ci=ci,
n_boot=n_boot,
units=units,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
errcolor=errcolor,
ax=ax,
errwidth=errwidth,
capsize=capsize,
**kwargs)
else: # Use violin or box plot for continuous or categorical
if violin_or_box == 'violin':
violinplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
bw=bw,
cut=cut,
scale=scale,
scale_hue=scale_hue,
gridsize=gridsize,
width=width,
inner=inner,
split=split,
orient=orient,
linewidth=linewidth,
color=color,
palette=palette,
saturation=saturation,
ax=ax,
**kwargs)
elif violin_or_box == 'box':
boxplot(
x=x,
y=y,
hue=hue,
data=data,
order=order,
hue_order=hue_order,
orient=orient,
color=color,
palette=palette,
saturation=saturation,
width=width,
fliersize=fliersize,
linewidth=linewidth,
whis=whis,
notch=notch,
ax=ax,
**kwargs)
else:
raise ValueError(
'\'violin_or_box\' must be either \'violin\' or \'box\'.')
decorate(style='ticks', title=title, xlabel=xlabel, ylabel=ylabel)
if filepath:
save_plot(filepath, file_extension=file_extension, dpi=dpi)
def plot_heatmap(dataframe,
vmin=None,
vmax=None,
cmap=None,
center=None,
robust=False,
annot=None,
fmt='.2g',
annot_kws=None,
linewidths=0,
linecolor='white',
cbar=False,
cbar_kws=None,
cbar_ax=None,
square=False,
xticklabels=False,
yticklabels=False,
mask=None,
figure_size=FIGURE_SIZE,
data_type='continuous',
normalization_method=None,
normalization_axis=0,
max_std=3,
axis_to_sort=None,
cluster=False,
row_annotation=(),
column_annotation=(),
annotation_colors=(),
title=None,
xlabel=None,
ylabel=None,
xlabel_rotation=0,
ylabel_rotation=90,
xtick_rotation=90,
ytick_rotation=0,
filepath=None,
file_extension='pdf',
dpi=DPI,
**kwargs):
"""
Plot heatmap.
:param dataframe:
:param vmin:
:param vmax:
:param cmap:
:param center:
:param robust:
:param annot:
:param fmt:
:param annot_kws:
:param linewidths:
:param linecolor:
:param cbar:
:param cbar_kws:
:param cbar_ax:
:param square:
:param xticklabels:
:param yticklabels:
:param mask:
:param figure_size:
:param data_type:
:param normalization_method:
:param normalization_axis:
:param max_std:
:param axis_to_sort:
:param cluster:
:param row_annotation:
:param column_annotation:
:param annotation_colors: list; a list of matplotlib color specifications
:param title:
:param xlabel:
:param ylabel:
:param xlabel_rotation:
:param ylabel_rotation:
:param xtick_rotation:
:param ytick_rotation:
:param filepath:
:param file_extension:
:param dpi:
:param kwargs:
:return: None
"""
df = dataframe.copy()
if normalization_method:
df = normalize_2d_or_1d(
df, normalization_method,
axis=normalization_axis).clip(-max_std, max_std)
if len(row_annotation) or len(column_annotation):
if len(row_annotation):
if isinstance(row_annotation, Series):
row_annotation = row_annotation.copy()
if not len(row_annotation.index & df.index): # Series
# but without proper index
row_annotation.index = df.index
else:
row_annotation = Series(row_annotation, index=df.index)
row_annotation.sort_values(inplace=True)
df = df.ix[row_annotation.index, :]
if len(column_annotation):
if isinstance(column_annotation, Series):
column_annotation = column_annotation.copy()
# Series but without proper index
if not len(column_annotation.index & df.columns):
column_annotation.index = df.columns
else:
column_annotation = Series(column_annotation, index=df.columns)
column_annotation.sort_values(inplace=True)
df = df.ix[:, column_annotation.index]
if axis_to_sort in (0, 1):
a = array(df)
a.sort(axis=axis_to_sort)
df = DataFrame(a, index=df.index)
elif cluster:
row_indices, column_indices = get_dendrogram_leaf_indices(dataframe)
df = df.iloc[row_indices, column_indices]
if isinstance(row_annotation, Series):
row_annotation = row_annotation.iloc[row_indices]
if isinstance(column_annotation, Series):
column_annotation = column_annotation.iloc[column_indices]
figure(figsize=figure_size)
gridspec = GridSpec(10, 10)
ax_top = subplot(gridspec[0:1, 2:-2])
ax_center = subplot(gridspec[1:8, 2:-2])
ax_bottom = subplot(gridspec[8:10, 2:-2])
ax_left = subplot(gridspec[1:8, 1:2])
ax_right = subplot(gridspec[1:8, 8:9])
ax_top.axis('off')
ax_bottom.axis('off')
ax_left.axis('off')
ax_right.axis('off')
if not cmap:
if data_type == 'continuous':
cmap = CMAP_CONTINUOUS
elif data_type == 'categorical':
cmap = CMAP_CATEGORICAL
elif data_type == 'binary':
cmap = CMAP_BINARY
else:
raise ValueError(
'Target data type must be continuous, categorical, or binary.')
heatmap(
df,
vmin=vmin,
vmax=vmax,
cmap=cmap,
center=center,
robust=robust,
annot=annot,
fmt=fmt,
annot_kws=annot_kws,
linewidths=linewidths,
linecolor=linecolor,
cbar=cbar,
cbar_kws=cbar_kws,
cbar_ax=cbar_ax,
square=square,
ax=ax_center,
xticklabels=xticklabels,
yticklabels=yticklabels,
mask=mask,
**kwargs)
# Get values for making legend
values = unique(df.values)
values = values[~isnull(values)]
if data_type == 'continuous': # Plot colorbar
# Get not-nan values for computing min, mean, & max
min_ = values.min()
mean_ = values.mean()
max_ = values.max()
cax, kw = make_axes(
ax_bottom,
location='bottom',
fraction=0.16,
cmap=cmap,
norm=Normalize(min_, max_),
ticks=[min_, mean_, max_])
ColorbarBase(cax, **kw)
decorate(ax=cax, xtick_rotation=90)
elif data_type in ('categorical', 'binary'): # Plot category legends
if len(values) < 30:
horizontal_span = ax_center.axis()[1]
vertical_span = ax_center.axis()[3]
colors = assign_colors_to_states(values, colors=cmap)
columns = df.columns.tolist()
if isinstance(columns[0], str):
max_len_c = max([len(c) for c in columns])
else:
max_len_c = 10
vertical_offset = 0.016 * max_len_c
for i, v in enumerate(values):
x = (horizontal_span / len(values) / 2) + \
i * horizontal_span / len(values)
y = 0 - vertical_span * vertical_offset
c = colors[v]
ax_center.plot(
x, y, 'o', color=c, markersize=16, aa=True, clip_on=False)
ax_center.text(
x,
y - vertical_span * 0.05,
v,
horizontalalignment='center',
**FONT_STANDARD)
decorate(
title=title,
xlabel=xlabel,
ylabel=ylabel,
xlabel_rotation=xlabel_rotation,
ylabel_rotation=ylabel_rotation,
xtick_rotation=xtick_rotation,
ytick_rotation=ytick_rotation,
ax=ax_center)
if len(row_annotation):
if len(set(row_annotation)) <= 2:
cmap = CMAP_BINARY
else:
if len(annotation_colors):
cmap = ListedColormap(annotation_colors)
else:
cmap = CMAP_CATEGORICAL
heatmap(
DataFrame(row_annotation),
ax=ax_right,
cbar=False,
xticklabels=False,
yticklabels=False,
cmap=cmap)
if len(column_annotation):
if len(set(column_annotation)) <= 2:
cmap = CMAP_BINARY
else:
if len(annotation_colors):
cmap = ListedColormap(annotation_colors)
else:
cmap = CMAP_CATEGORICAL
heatmap(
| DataFrame(column_annotation) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:16:48 2018
@author: cenv0574
"""
import os
import pandas as pd
import numpy as np
import atra.utils
from ras_method import ras_method
import subprocess
import warnings
warnings.filterwarnings('ignore')
data_path= atra.utils.load_config()['paths']['data']
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
def est_trade_value(x,output_new,sector):
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'VA'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
# x['gdp'] = x.gdp*(sec_output.loc[sec_output.region==x.reg1].values[0][2])
return x
def indind_iotable(sup_table,use_table,sectors):
# GET VARIABLES
x = np.array(sup_table.sum(axis=0)) # total production on industry level
g = np.array(sup_table.sum(axis=1)) # total production on product level
F = use_table.iloc[:16,16:].sum(axis=1)
#Numpify
Sup_array = np.asarray(sup_table.iloc[:len(sectors),:len(sectors)]) # numpy array if supply matrix
Use_array = np.asarray(use_table.iloc[:len(sectors),:len(sectors)]) # numpy array of use matrix
g_diag_inv = np.linalg.inv(np.diag(g)) # inverse of g (and diagolinized)
x_diag_inv = np.linalg.inv(np.diag(x)) # inverse of x (and diagolinized)
# Calculate the matrices
B = np.dot(Use_array,x_diag_inv) # B matrix (U*x^-1)
D = np.dot(Sup_array.T,g_diag_inv) # D matrix (V*g^-1)
I_i = np.identity((len(x))) # Identity matrix for industry-to-industry
# Inverse for industry-to-industry
A_ii = np.dot(D,B)
F_ii = np.dot(D,F)/1e6
IDB_inv = np.linalg.inv((I_i-np.dot(D,B))) # (I-DB)^-1
# And canclulate sum of industries
ind = np.dot(IDB_inv,np.dot(D,F)/1e6) # (I-DB)^-1 * DF
IO = pd.concat([pd.DataFrame(np.dot(A_ii,np.diag(ind))),pd.DataFrame(F_ii)],axis=1)
IO.columns = list(use_table.columns[:17])
IO.index = list(use_table.columns[:16])
VA = np.array(list(ind)+[0])-np.array(IO.sum(axis=0))
VA[-1] = 0
IO.loc['ValueA'] = VA
return IO,VA
# =============================================================================
# # Load mapper functions to aggregate tables
# =============================================================================
ind_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='ind_mapper',header=None)
ind_mapper = dict(zip(ind_mapper[0],ind_mapper[1]))
com_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='com_mapper',header=None)
com_mapper = dict(zip(com_mapper[0],['P_'+x for x in com_mapper[1]]))
reg_mapper = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='reg_mapper',header=None)
reg_mapper = dict(zip(reg_mapper[0], reg_mapper[1]))
sectors = [chr(i) for i in range(ord('A'),ord('P')+1)]
# =============================================================================
# Load supply table and aggregate
# =============================================================================
sup_table_in = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='Mat Oferta pb',skiprows=2,header=[0,1],index_col=[0,1],nrows=271)
sup_table_in = sup_table_in.drop('Total',level=0,axis=1)
sup_table = sup_table_in.copy()
sup_table.columns = sup_table.columns.get_level_values(0)
sup_table.columns = sup_table.columns.map(ind_mapper)
sup_table = sup_table.T.groupby(level=0,axis=0).sum()
sup_table.columns = sup_table.columns.get_level_values(0)
sup_table.columns = sup_table.columns.map(com_mapper)
sup_table = sup_table.T.groupby(level=0,axis=0).sum()
# =============================================================================
# Load use table and aggregate
# =============================================================================
use_table = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','sh_cou_06_16.xls'),
sheet_name='Mat Utilizacion pc',skiprows=2,header=[0,1],index_col=[0,1],nrows=271)
basic_prod_prices = use_table[['IMPORTACIONES (CIF a nivel de producto y FOB a nivel total)',
'AJUSTE CIF/FOB DE LAS IMPORTACIONES','DERECHOS DE IMPORTACION',
'IMPUESTOS A LOS PRODUCTOS NETOS DE SUBSIDIOS','MARGENES DE COMERCIO',
'MARGENES DE TRANSPORTE','IMPUESTO AL VALOR AGREGADO NO DEDUCIBLE',
]]*-1
use_table = use_table.drop(['PRODUCCION NACIONAL A PRECIOS BASICOS',
'IMPORTACIONES (CIF a nivel de producto y FOB a nivel total)',
'AJUSTE CIF/FOB DE LAS IMPORTACIONES','DERECHOS DE IMPORTACION',
'IMPUESTOS A LOS PRODUCTOS NETOS DE SUBSIDIOS','MARGENES DE COMERCIO',
'MARGENES DE TRANSPORTE','IMPUESTO AL VALOR AGREGADO NO DEDUCIBLE',
'OFERTA TOTAL A PRECIOS DE COMPRADOR','UTILIZACION INTERMEDIA',
'UTILIZACION FINAL','DEMANDA TOTAL'],level=0,axis=1)
basic_prod_prices.columns = basic_prod_prices.columns.get_level_values(0)
basic_prod_prices = basic_prod_prices.T.groupby(level=0,axis=0).sum()
basic_prod_prices.columns = basic_prod_prices.columns.get_level_values(0)
basic_prod_prices.columns = basic_prod_prices.columns.map(com_mapper)
basic_prod_prices = basic_prod_prices.T.groupby(level=0,axis=0).sum()
basic_prod_prices = basic_prod_prices.astype(int)
use_table.columns = use_table.columns.get_level_values(0)
use_table.columns = use_table.columns.map(ind_mapper)
use_table = use_table.T.groupby(level=0,axis=0).sum()
use_table.columns = use_table.columns.get_level_values(0)
use_table.columns = use_table.columns.map(com_mapper)
use_table = use_table.T.groupby(level=0,axis=0).sum()
use_table= pd.concat([use_table,basic_prod_prices],axis=1)
# =============================================================================
# Create IO table and translate to 2016 values
# =============================================================================
IO_ARG,VA = indind_iotable(sup_table,use_table,sectors)
va_new = [498.319,21.986,264.674,1113.747,123.094,315.363,1076.121,168.899,441.293,321.376,750.356,647.929,448.372,426.642,235.624,58.837]
u = ((((np.array(IO_ARG.sum(axis=0)))/VA)[:16])*va_new)
new_fd = (np.array(IO_ARG.iloc[:,16]/(np.array(IO_ARG.sum(axis=0))))*np.array(list(u)+[0]))
new_IO = ras_method(np.array(IO_ARG)[:16,:17],np.array((u)),np.array(list(u-np.array(va_new))+[sum(va_new)]), eps=1e-5)
NEW_IO = pd.DataFrame(new_IO,columns=sectors+['FD'],index=sectors)
NEW_IO.loc['ValueA'] = np.array(list(va_new)+[0])
# =============================================================================
# Save 2016 table and the indices to prepare disaggregation
# =============================================================================
NEW_IO.to_csv(os.path.join(data_path,'mrio_analysis','basetable.csv'),index=False,header=False)
pd.DataFrame([len(sectors+['other1'])*['ARG'],sectors+['other']]).T.to_csv(os.path.join(data_path,'mrio_analysis','indices.csv'),index=False,header=False)
''' First iteration, no trade to determine total regional input and output '''
# =============================================================================
# Load provincial data
# =============================================================================
prov_data = pd.read_excel(os.path.join(data_path,'economic_IO_tables','input','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
# =============================================================================
# Create proxy data for first iteration
# =============================================================================
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join(data_path,'mrio_analysis','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join(data_path,'mrio_analysis','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join(data_path,'mrio_analysis','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join(data_path,'mrio_analysis','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join(data_path,'mrio_analysis','proxy_trade_{}.csv'.format(sector)),index=False)
# =============================================================================
# Create first version of MRIO for Argentina
# =============================================================================
p = subprocess.Popen(['mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join(data_path, 'mrio_analysis'))
p.wait()
region_names_list = [item for sublist in [[x]*(len(sectors)+1) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA']])*len(region_names)
cols = ([x for x in sectors+['FD']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join(data_path,'mrio_analysis','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = | pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col')) | pandas.MultiIndex.from_arrays |
import os
import pandas as pd
from scipy.spatial import distance
from tqdm import tqdm
import argparse
parser = argparse.ArgumentParser(description='To set W_PATH to the same directory that contains the coordinate data extracted from the pdb.cif files')
parser.add_argument('-o', '--output_directory', help='An output directory must be named', required=True)
args = parser.parse_args()
W_PATH = args.output_directory
cif_names = []
filenames = []
filenames_2 = []
class CreateDistanceMapsAdjacencyMatrix:
def __init__(self, walk_path, cif_names, filenames, filenames_2):
self.walk_path = walk_path
self.cif_names = cif_names
self.filenames = filenames
self.filenames_2 = filenames_2
def prepare_filenames(self, *args):
print("Preparing file name lists for later use...")
for Count, Item in enumerate(args):
if Item == 'cif_names':
for root, dirs, files in os.walk('./' + self.walk_path, topdown=False):
for name in files:
cif_names.append(name)
elif Item == 'filenames':
for root, dirs, files in os.walk('./' + self.walk_path, topdown=False):
for name in files:
if '.csv' in name:
filenames.append(name)
elif Item == 'filenames_2':
for root, dirs, files in os.walk('./' + self.walk_path, topdown=False):
for name in files:
if '2_' in name:
filenames_2.append(name)
print("Preparation complete")
return self.cif_names, self.filenames, self.filenames_2
def extract_critical_information(self, cif_names):
"""Converts .cif to .csv with only essential information"""
print("Extract coordinate information from pdb.cif files, saving as pdb.csv...")
for name in self.cif_names:
with open('./' + self.walk_path + '/' + name) as infile:
target_list = infile.read().split('\n')
df = pd.DataFrame(data=target_list, columns=["header"]) # Put list in a dataframe m X 1 column
# df = df[:-1] # Delete last row of the dataframe which has been added in line above
df_2 = df.header.str.split(expand=True) # Put dataframe to m x 20 columns
assert df.shape[0] == df_2.shape[0]
df_3 = df_2.drop(columns=[0, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 19, 20], axis=1) # Remove non essential columns
assert df_2.shape[0] == df_3.shape[0]
df_3.to_csv('./' + self.walk_path + '/' + name.split('.')[0] + '.csv', encoding='utf-8', index=False, header=False)
print("Extraction complete")
def calculate_distance_maps(self, filenames):
"""Creates the distance maps"""
print("Preparing distance maps...")
for name in self.filenames:
read_csv_df = pd.read_csv('./' + self.walk_path + '/' + name, header=None)
# read_csv_less_df = read_csv_df[:-1] # remove last row of the dataframe
remove_columns_df = read_csv_df.drop(columns=[0, 1], axis=1)
assert read_csv_df.shape[0] == remove_columns_df.shape[0]
convert_to_array = remove_columns_df.to_numpy()
calculate_distances = distance.pdist(convert_to_array, 'euclidean')
make_square = distance.squareform(calculate_distances)
to_df_for_saving = pd.DataFrame(make_square)
assert remove_columns_df.shape[0] == to_df_for_saving.shape[0]
to_df_for_saving.to_csv('./' + self.walk_path + '/2_' + name, encoding='utf-8', index=False, header=False)
print("Distance maps completed")
def join_datasets(self, filenames, filenames_2):
"""Combines the datasets"""
print("Join datasets")
for name in tqdm(self.filenames):
for name_2 in self.filenames_2:
df_1 = pd.read_csv('./' + self.walk_path + '/' + name_2, header=None)
df_2 = pd.read_csv('./' + self.walk_path + '/' + name, header=None)
assert df_1.shape[0] == df_2.shape[0]
df_join = | pd.concat([df_2, df_1], axis=1, join='inner') | pandas.concat |
import seaborn as sns
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
with open('./train.en') as f:
en = [s.strip().split(' ') for s in f]
en_len = np.array(list(map(len, en)))
with open('./train.jp') as f:
jp = [s.strip().split(' ') for s in f]
jp_len = np.array(list(map(len, jp)))
len_df = | pd.DataFrame({'en_len':en_len, 'jp_len':jp_len}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 17 13:52:10 2018
@author: i
"""
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import datetime
from time import strftime
import os
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as md
from matplotlib.ticker import MaxNLocator
import matplotlib.offsetbox as offsetbox
# options for drop down menus in dialog window
hours_months = [1,2,3,4,5,6,7,8,9,10,11,12]
am_pm = ["am","pm"]
days = [1,2,3,4,5,6,7,8,9,10,
11,12,13,14,15,16,17,18,19,20,
21,22,23,24,25,26,27,28,29,30,31]
years = [2014,2015,2016,2017,2018,2019,2020,2021,2022,2023,2024]
hours24 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]
# function to get night intervals and day intervals from a single dataframe
# returns two lists of start end tuples, one for nights and one for days
def get_intervals(mouse_df, lights_on, lights_out):
# get timestamps from mouse df
dates_range = mouse_df.index
#create distinct, consecutive dates
unique_dates = pd.date_range(mouse_df.index.min().date(),
mouse_df.index.max().date(),
freq='D')
# mouse day and night intervals
night_intervals = []
day_intervals = []
# create night intervals
# for each date in mouse, create start_hour-end_hour pair
# of night interval
for j in range(len(unique_dates)):
# start interval
start_night = datetime.datetime(unique_dates[j].year,
unique_dates[j].month,
unique_dates[j].day,
hour=lights_out,
minute=0,
second=0)
end_night_before = datetime.datetime(unique_dates[j].year,
unique_dates[j].month,
unique_dates[j].day,
hour=lights_on,
minute=0,
second=0)
# make sure it is not the last inteval
if (j+1) < len(unique_dates):
# end interval
end_night_next = datetime.datetime(unique_dates[j+1].year,
unique_dates[j+1].month,
unique_dates[j+1].day,
hour=lights_on,
minute=0,
second=0)
else: # if it is last interval
if start_night < dates_range[-1]:
night_intervals.append((start_night, dates_range[-1]))
break
if j == 0: # for the first interval
if end_night_before > dates_range[0]:
temp0 = dates_range[0]
temp1 = end_night_before
night_intervals.append((temp0,temp1))
# next night interval strats on the same date
temp0 = start_night
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
else:
temp0 = start_night
# if the next date is in the list,
# set it to the end of nighttime,
# if not set the end of plot to be the end of nighttime
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
else: # not the first day
temp0 = start_night
temp1 = end_night_next if end_night_next <= dates_range[-1] else dates_range[-1]
night_intervals.append((temp0,temp1))
# invert night intervals to get days
for j in range(len(night_intervals)):
start_night, end_night = night_intervals[j]
# if it is the first interval
if j==0:
# if night starts later than the start of the timestamps
if start_night > dates_range[0]:
start_day = dates_range[0]
end_day = start_night
day_intervals.append((start_day,end_day))
else:
# check if this is not the only interval
if j+1 < len(night_intervals):
start_day = end_night
end_day = night_intervals[j+1][0]
day_intervals.append((start_day,end_day))
else: # if it was the only interval
if end_night < dates_range[-1]:
start_day = end_night
end_day = dates_range[-1]
day_intervals.append((start_day,end_day))
# check if it was the last interval
elif j+1 == len(night_intervals):
if len(day_intervals) > 1:
if end_night < dates_range[-1]:
start_day = end_night
end_day = dates_range[-1]
day_intervals.append((start_day,end_day))
else:
start_day = end_night
end_day = night_intervals[j+1][0]
day_intervals.append((start_day,end_day))
return night_intervals, day_intervals
class FedApp(Toplevel):
def __init__(self, parent, title = None):
Toplevel.__init__(self, parent)
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.retrieved_id_ints = None
# instantiate window with options and make it in focus
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
# instantaite buttons ok and cancel
self.buttonbox()
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.initial_focus.focus_set()
self.wait_window(self)
def body(self, master):
# dialog body. return widget with options that should have
# initial focus.
Label(master, text="Select folder with csv files:").grid(row=0,
columnspan=4,
sticky=W,
padx=5,
pady=15)
Label(master, text="Lights on at:").grid(row=2, column=0, sticky=W, padx=5, pady=5)
Label(master, text="Lights out at:").grid(row=3, sticky=W, padx=5, pady=5)
Label(master, text="Select your dates and hours (mm/dd/yyyy h):").grid(row=4,
columnspan=3,
sticky=W,
padx=5,
pady=20)
Label(master, text="Month").grid(row=5, column=1, padx=5, pady=5)
Label(master, text="Day").grid(row=5, column=3, padx=5, pady=5)
Label(master, text="Year").grid(row=5, column=5, padx=5, pady=5)
Label(master, text="Hour").grid(row=5, column=6, padx=5, pady=5)
Label(master, text="From:").grid(row=6, column=0, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=6, column=2, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=6, column=4, sticky=W, padx=5, pady=5)
Label(master, text="Until:").grid(row=7, column=0, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=7, column=2, sticky=W, padx=5, pady=5)
Label(master, text="/").grid(row=7, column=4, sticky=W, padx=5, pady=5)
Label(master, text="*Includes both above dates!", fg="red").grid(row=8, columnspan=3, sticky=W, padx=5, pady=5)
Label(master, text="Bin size for histograms:\n(in minutes)").grid(row=9, sticky=W, padx=5, pady=5)
self.folder_value = StringVar()
self.lights_on_default = IntVar(value=6)
self.lights_out_default = IntVar(value=6)
self.lights_on_am_pm_default = StringVar(value="am")
self.lights_out_am_pm_default = StringVar(value="pm")
self.month_from_default = IntVar(value=3)
self.day_from_default = IntVar(value=8)
self.year_from_default = IntVar(value=2018)
self.hour_from_default = IntVar(value=18)
self.month_until_default = IntVar(value=3)
self.day_until_default = IntVar(value=10)
self.year_until_default = IntVar(value=2018)
self.hour_until_default = IntVar(value=18)
self.bin_size_default = IntVar(value=60)
self.select_folder_btn = Button(master, text="Select", command=self.show_folders)
self.select_folder_btn.grid(row=1,column=5, padx=5, pady=5)
self.folder_path = Entry(master, textvariable=self.folder_value, width=60)
self.lights_on = OptionMenu(master, self.lights_on_default, *hours_months)
self.lights_on['bg'] = "#FFF994"
self.lights_on_am_pm = OptionMenu(master, self.lights_on_am_pm_default, *am_pm)
self.lights_out = OptionMenu(master, self.lights_out_default, *hours_months)
self.lights_out['bg'] ="#689CEB"
self.lights_out_am_pm = OptionMenu(master, self.lights_out_am_pm_default, *am_pm)
self.month_from = OptionMenu(master, self.month_from_default, *hours_months)
self.day_from = OptionMenu(master, self.day_from_default, *days)
self.year_from = OptionMenu(master, self.year_from_default, *years)
self.hour_from = OptionMenu(master, self.hour_from_default, *hours24)
self.month_until = OptionMenu(master, self.month_until_default, *hours_months)
self.day_until = OptionMenu(master, self.day_until_default, *days)
self.year_until = OptionMenu(master, self.year_until_default, *years)
self.hour_until = OptionMenu(master, self.hour_until_default, *hours24)
self.bin_size = Entry(master, textvariable=self.bin_size_default)
self.folder_path.grid(row=1, columnspan=5, sticky=W, padx=5, pady=15)
self.lights_on.grid(row=2, column=1, padx=5, pady=5)
self.lights_out.grid(row=3, column=1, padx=5, pady=5)
self.lights_on_am_pm.grid(row=2, column=2, columnspan=4, sticky=W, padx=5, pady=5)
self.lights_out_am_pm.grid(row=3, column=2, columnspan=4, sticky=W, padx=5, pady=5)
self.month_from.grid(row=6,column=1, padx=5, pady=5)
self.day_from.grid(row=6, column=3, padx=5, pady=5)
self.year_from.grid(row=6,column=5, padx=5, pady=5)
self.hour_from.grid(row=6, column=6, padx=5, pady=5)
self.month_until.grid(row=7,column=1, padx=5, pady=5)
self.day_until.grid(row=7, column=3, padx=5, pady=5)
self.year_until.grid(row=7,column=5, padx=5, pady=5)
self.hour_until.grid(row=7, column=6, padx=5, pady=5)
self.bin_size.grid(row=9, column=1, columnspan=2, sticky=W, padx=5, pady=5)
self.plot_checkbox = IntVar(value=1)
self.cb = Checkbutton(master,
text="Save Plots",
variable=self.plot_checkbox)
self.cb.grid(row=11, column=0, columnspan=3, sticky=E, padx=5, pady=5)
self.data_checkbox = IntVar(value=1)
self.cb = Checkbutton(master,
text="Save Data",
variable=self.data_checkbox)
self.cb.grid(row=11, column=3, columnspan=3, sticky=W, padx=5, pady=5)
def buttonbox(self):
# add standard button box (ok and cancel)
box = Frame(self)
w = Button(box, text="Cancel", width=10, command=self.cancel)
w.pack(side=LEFT, padx=5, pady=15)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=15)
# same commands with keyboard (enter==ok, esc==cancel)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.cancel)
box.pack()
#########################################
# This is where all the magic is called #
#########################################
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
# retrieve user input
self.get_input()
# close options window
self.cancel()
# execute main functionality of the script
self.main_function()
print
print("\nDone")
try:
self.parent.destroy()
except:
return
#########################################
def cancel(self, event=None):
# put focus back to the parent window
self.parent.focus_set()
self.destroy()
def validate(self):
# validate if path was given
# if day and night last 12 hours
# if bin size is integer no larger than 12 hours
# if given dates are chronological
# check if day duration of day and night is at least an hour
if self.lights_on_default.get() == 12:
if self.lights_on_am_pm_default.get()=="am":
self.my_lights_on = 0
elif self.lights_on_am_pm_default.get()=="pm":
self.my_lights_on = self.lights_on_default.get()
elif self.lights_on_am_pm_default.get()=="am":
self.my_lights_on = self.lights_on_default.get()
elif self.lights_on_am_pm_default.get()=="pm":
self.my_lights_on = self.lights_on_default.get()+12
if self.lights_out_default.get() == 12:
if self.lights_out_am_pm_default.get()=="am":
self.my_lights_out = 0
elif self.lights_out_am_pm_default.get()=="pm":
self.my_lights_out = self.lights_out_default.get()
elif self.lights_out_am_pm_default.get()=="am":
self.my_lights_out = self.lights_out_default.get()
elif self.lights_out_am_pm_default.get()=="pm":
self.my_lights_out = self.lights_out_default.get()+12
if abs(self.my_lights_on - self.my_lights_out) != 12:
messagebox.showwarning(
"Warning!",
"Day and Night should last 12 hours each!"
)
return 0
try:
# check in path was provided
if len(self.folder_path.get()) > 0:
# test if bin is integer
int(self.bin_size.get())
else:
messagebox.showwarning(
"Warning!",
"Remember to select the path.\nBin has to be an integer.\n\nPlease try again."
)
return 0
# check range of bin size (no bigger than 12 hours)
if int(self.bin_size.get()) <= 0 or int(self.bin_size.get()) > 720:
messagebox.showwarning(
"Warning!",
"Bin size has to be smaller than 12 hours (720 minutes)!"
)
return 0
# check if from date is earlier than until date
date_from_date = datetime.datetime(self.year_from_default.get(),
self.month_from_default.get(),
self.day_from_default.get(),
hour=self.hour_from_default.get(),
minute=0,second=0)
date_until_date = datetime.datetime(self.year_until_default.get(),
self.month_until_default.get(),
self.day_until_default.get(),
hour=self.hour_until_default.get(),
minute=0,second=0)
if date_from_date < date_until_date:
return 1
else:
messagebox.showwarning(
"Warning!",
"From date has to be before Until date!"
)
return 0
except ValueError:
messagebox.showwarning(
"Warning!",
"Remember to select the path.\nBin has to be an integer.\n\nPlease try again."
)
return 0
def get_input(self):
# executed after clicking on ok button
self.main_folder_path = self.folder_path.get()
date_from_str = (str(self.year_from_default.get()) + "-" +
str(self.month_from_default.get()) + "-" +
str(self.day_from_default.get()) + " " +
str(self.hour_from_default.get()) + ":00:00")
date_until_str = (str(self.year_until_default.get()) + "-" +
str(self.month_until_default.get()) + "-" +
str(self.day_until_default.get()) + " " +
str(self.hour_until_default.get()) + ":00:00")
self.my_start_date = date_from_str
self.my_end_date = date_until_str
self.my_bin_size = self.bin_size.get()
self.to_plot = False if self.plot_checkbox.get()==0 else True
self.to_save_data = False if self.data_checkbox.get()==0 else True
def show_folders(self):
# executed when select button in dialog box is clicked
# select folder from explorer window
self.src = filedialog.askdirectory()
self.folder_value.set(self.src)
def select_mice(self):
# create a list of available mice ids
self.mice_ids_str_values = ""
for i in range(len(self.mice_ids_list)):
if i+1 == len(self.mice_ids_list): # if the last one
self.mice_ids_str_values = self.mice_ids_str_values + str(self.mice_ids_list[i])
else:
self.mice_ids_str_values = self.mice_ids_str_values + str(self.mice_ids_list[i]) + ","
# create option window
self.option_window = Tk()
self.option_window.title('Mice Selection')
Label(self.option_window, text="Select your mice from the list of available mice:").grid(row=0, column=0, sticky=W, padx=5, pady=5)
Label(self.option_window, text=self.mice_ids_str_values).grid(row=1, column=0, padx=5, pady=5)
self.mice_selection = Entry(self.option_window, textvariable="")
# clear entry just in case, and set the text to mice ids from files
self.mice_selection.delete(0, END)
self.mice_selection.insert(0, self.mice_ids_str_values)
self.mice_selection.grid(row=2, column=0, padx=5, pady=5)
Label(self.option_window, text="*List of coma separated integer ids! No spaces!", fg="red").grid(row=3, column=0, sticky=W, padx=5, pady=5)
b = Button(self.option_window, text='Ok', command=self.get_mice_choice)
b.grid(row=4, column=0, sticky='nsew', padx=20, pady=5)
self.option_window.initial_focus = self.option_window
self.option_window.wait_window(self.option_window)
def get_mice_choice(self):
try:
# remove leading and trailing whitespaces and comas, and split by comas
retrieved_id_strings = self.mice_selection.get().strip().strip(',').split(',')
# check if all options are integers
self.retrieved_id_ints = [int(el) for el in retrieved_id_strings]
# check if all options were available in file
for el in self.retrieved_id_ints:
if str(el) not in self.mice_ids_str_values.split(','):
messagebox.showwarning(
"Warning!",
"Some of the given ids might not be available in the files."
)
self.option_window.destroy()
# reset ids to none
self.retrieved_id_ints = None
return
except:
messagebox.showwarning(
"Warning!",
"List of coma separated integer ids!\nNo spaces!\n"
)
self.option_window.destroy()
# reset ids to none
self.retrieved_id_ints = None
return
self.option_window.destroy()
#########################################################################
# My sequence of actions (reading, binning, plotting)
def main_function(self):
csv_read = self.read_csv_files()
# if the csv_read function's checks failed
if csv_read == 0:
return 0
# read which mice to include
self.select_mice()
# if selected mice were not correctly validated, end here
if self.retrieved_id_ints is None:
print("Failed to select mice")
return
# create a new list of dataframes only with the selected mice
self.include_selected_mice()
# retrieve totals for each day, each day/night
# and interval times between pellet intakes
self.get_data()
# create path to subfolder for results
# MonthDay_HourMinute + the above ending + .xls
current_time = strftime("%m%d_%H%M_%S")+"s"
subfolder_name = "Results"+current_time
self.subfolder_path = os.path.join(self.main_folder_path, subfolder_name)
# if folder not yet created, create one
if (not os.path.exists(self.subfolder_path)):
os.mkdir(self.subfolder_path)
# plot binned pellets and motorturns and intervlas (one plot per mouse)
self.plot_pellets_and_motorturns()
if (self.to_plot):
# plot histograms
self.plot_histograms()
self.plot_kcal()
if (self.to_save_data):
# save day data and day/night data
self.save_data()
#############################################################################
#####################################################
# FUNCTION TO READ AND SORT EACH MOUSE DATA,
# AND TO GET ALL MICE IDS
def read_csv_files(self):
# reads csv files and organizes them into dataframes
all_dataframes = []
# for all files in folder
for file in os.listdir(self.main_folder_path):
if file.endswith(".CSV"):
if file.startswith("FED"):
# read that file into a dataframe
file_path = os.path.join(self.main_folder_path ,file)
df = pd.read_csv(file_path)
all_dataframes.append(df)
##################################################
# create a single dataframe from all files
self.main_df = pd.concat(all_dataframes)
print(self.main_df)
##################################################
# create separate dataframe for each mouse
# (all original columns)
by_mouse_df_list = []
# find unique mouse indexes
mice_indexes = pd.unique(self.main_df[' Mouse'])
# split main dataframe into single dataframe per mouse
for index in mice_indexes:
single_mouse_df = self.main_df[self.main_df[' Mouse']==index]
by_mouse_df_list.append(single_mouse_df)
########################################################################
# list of dataframes by mouse (only given dates)
# (only sorted timestamps, mouse index, pellet count, motorturn count)
self.mouse_df_list = []
########################################################################
# make sure all dates are sorted:
for i in range(len(by_mouse_df_list)):
# count how many rows are there
# that is equal to the total pellet count
total_pellet_count = by_mouse_df_list[i].shape[0]
# create consecutive pellet count values
total_pellet_count_list = [i+1 for i in range(total_pellet_count)]
# convert dates to pandas datetime
ts_list = pd.to_datetime(by_mouse_df_list[i]['MM:DD:YYYY hh:mm:ss']).tolist()
# create new dataframe
new_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :ts_list,
"Mouse" : by_mouse_df_list[i][' Mouse'].tolist(),
"PelletCount" : total_pellet_count_list,
"MotorTurns" : by_mouse_df_list[i][' MotorTurns'].tolist()})
# make timestamps indexes
new_df.index = new_df['MM:DD:YYYY hh:mm:ss']
# remove old column
del new_df['MM:DD:YYYY hh:mm:ss']
# sort dates
new_df = new_df.sort_index()
# select only user defined timeframe
# https://pandas.pydata.org/pandas-docs/stable/timeseries.html
new_df = new_df[self.my_start_date:self.my_end_date]
# replace pellet count with new consecutive pellet count for that dates
new_df['PelletCount'] = [i+1 for i in range(new_df.shape[0])]
if new_df.shape[0] != 0:
self.mouse_df_list.append(new_df)
else:
# if for a mouse, there is no data within given dates
# my_start_year,my_start_month,my_start_day = self.my_start_date.split('-')
# my_end_year,my_end_month,my_end_day = self.my_end_date.split('-')
# create dataframe with all zero values
start = datetime.datetime.strptime(self.my_start_date, "%Y-%m-%d %H:%M:%S")
end = datetime.datetime.strptime(self.my_end_date, "%Y-%m-%d %H:%M:%S")
new_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [by_mouse_df_list[i][' Mouse'].iloc[0], by_mouse_df_list[i][' Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
new_df.index = new_df['MM:DD:YYYY hh:mm:ss']
del new_df['MM:DD:YYYY hh:mm:ss']
new_df = new_df.sort_index()
self.mouse_df_list.append(new_df)
# check if there was any data
if len(self.mouse_df_list) == 0:
messagebox.showwarning(
"Warning!",
"No data for given dates!"
)
return 0
# get all mice ids from dataframes
self.mice_ids_list = []
for i in range(len(self.mouse_df_list)):
mouse_id = self.mouse_df_list[i]['Mouse'].iloc[0]
if mouse_id not in self.mice_ids_list:
self.mice_ids_list.append(mouse_id)
return 1
def include_selected_mice(self):
included_mice_df = []
for i in range(len(self.mouse_df_list)):
# get mouse id from the dataframe
mouse_id = self.mouse_df_list[i]['Mouse'].iloc[0]
# check if that is was selected by user
if mouse_id in self.retrieved_id_ints:
included_mice_df.append(self.mouse_df_list[i])
# make new list of dataframes only with selected mice a main source of data
self.mouse_df_list = included_mice_df
##################################################################
# FUNCTION TO GET DAY AND NIGHT INTERVALS FOR EACH MOUSE
# TO CALCULATE TOTAL PELLET INTAKE BY 24HRS AND AVERAGE,
# TOTAL PELLET INTAKE DURING DAYTIMES AND NIGHTTIMES AND AVERAGE
# INTERVALS BETWEEN PELLET INTAKES
def get_data(self):
################################################################
# day and night intervals by mouse
self.night_mouse_intervals = []
self.day_mouse_intervals = []
######################################################################
# for each mouse get all night intervals and all day intervals
for i in range(len(self.mouse_df_list)):
# single mouse
night_intervals, day_intervals = get_intervals(self.mouse_df_list[i],
self.my_lights_on,
self.my_lights_out)
# add to the list of intervals for all mice
self.night_mouse_intervals.append(night_intervals)
self.day_mouse_intervals.append(day_intervals)
######## end creating all day intervals for that mouse
######## (self.day_mouse_intervals)
# find first date of all and last date from all
starts = []
ends = []
# for all mice find beginning and end of data
for df in self.mouse_df_list:
# find first date for that mouse and the last date
starts.append(df.index.min())
ends.append(df.index.max())
# find the earliest date from all mice and the lates date from all mice
earliest_of_all = min(starts)
latest_of_all = max(ends)
# print(earliest_of_all, latest_of_all)
# create the list of start times for all available 24 hour periods
# first, find whether the earliest common date is closer to start day or start night
only_date_earliest = earliest_of_all.date()
that_day = pd.Timestamp(year=only_date_earliest.year,
month=only_date_earliest.month,
day=only_date_earliest.day,
hour=int(self.my_lights_on))
that_night = pd.Timestamp(year=only_date_earliest.year,
month=only_date_earliest.month,
day=only_date_earliest.day,
hour=int(self.my_lights_out))
# decide whether to start from the lights out or lights on hour
if abs(that_day-earliest_of_all) < abs(that_night-earliest_of_all):
my_earliest = that_day
else:
my_earliest = that_night
all_24hrs_dates = pd.date_range(my_earliest,
latest_of_all,
freq='D')
# create a dataframe for each mouse
# that contains data from common start and common end
mouse_per24hrs_full_dfs = []
all_mouse_day_intervals = []
all_mouse_nigtht_intervals = []
for df in self.mouse_df_list:
new_df = df[earliest_of_all:latest_of_all]
mouse_per24hrs_full_dfs.append(new_df)
# get all night and day intervals for that mouse
night_intervals, day_intervals = get_intervals(new_df,
self.my_lights_on,
self.my_lights_out)
all_mouse_day_intervals.append(day_intervals)
all_mouse_nigtht_intervals.append(night_intervals)
# for each mouse create list of dataframes that contain timestamps
# for each 24 hour period
mouse_24hrs_dfs = []
for i in range(len(mouse_per24hrs_full_dfs)):
# list of tuples,
# first element is a pair of start-end period,
# second element is dataframe, one df per each 24 hour period
mouse_dfs = []
for j in range(len(all_24hrs_dates)):
# if it is the first beginning
if j == 0:
# check if this is the only beginning
if len(all_24hrs_dates) == 1:
start = all_24hrs_dates[j]
# ends on last available time
end = mouse_per24hrs_full_dfs[i].index.max()
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
else: # this was not the only beginning (not the last)
start = all_24hrs_dates[j]
end = all_24hrs_dates[j+1]
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
# check if it was the last beginning
elif (j+1) == len(all_24hrs_dates):
start = all_24hrs_dates[j]
end = mouse_per24hrs_full_dfs[i].index.max()
# check if the start date is earlier that the end of data
if start < end:
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
else: # not the first and not the last beginning
start = all_24hrs_dates[j]
end = all_24hrs_dates[j+1]
single_period_df = mouse_per24hrs_full_dfs[i][start:end]
if single_period_df.empty:
# if there was no data for that interval create a dummy
single_period_df = pd.DataFrame({"MM:DD:YYYY hh:mm:ss" :[start,end],
"Mouse" : [mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0], mouse_per24hrs_full_dfs[i]['Mouse'].iloc[0]],
"PelletCount" : [0,0],
"MotorTurns" : [0,0]})
single_period_df.index = single_period_df['MM:DD:YYYY hh:mm:ss']
del single_period_df['MM:DD:YYYY hh:mm:ss']
mouse_dfs.append(((start,end),single_period_df))
mouse_24hrs_dfs.append(mouse_dfs)
# print(mouse_dfs)
# create dataframes for the csv files
# for all mice, create dataframe with start dates as indexes
# column names as mice ids with sums from that 24 hours in each row
# last column with means
# divide each day into sums from daytime and nighttime
mice_by_24hrs_cumm_dfs = []
mice_by24h_day_night_dfs = []
for i in range(len(mouse_24hrs_dfs)): # for each mouse
my_sums = []
my_day_sums = []
my_night_sums = []
# print("\nMouse", i+1)
for j in range(len(mouse_24hrs_dfs[i])): # for each day
# get a list of either day or night according to hours from timestamp
day_night_list = []
for row in mouse_24hrs_dfs[i][j][1].itertuples():
if row[0].hour >= self.my_lights_on and row[0].hour < self.my_lights_out:
day_night_list.append('day')
else:
day_night_list.append('night')
# create new dataframe with a column for days and nights
# first element of tuple is pair of dates
# second element is dataframe
day_night_df = mouse_24hrs_dfs[i][j][1].copy()
day_night_df['DayNight'] = day_night_list
# (if sum=0, there were no data)
if day_night_df[day_night_df.DayNight == 'day']['PelletCount'].sum() == 0:
my_day_sum = np.nan
else:
my_day_sum = day_night_df[day_night_df.DayNight == 'day'].shape[0]
if day_night_df[day_night_df.DayNight == 'night']['PelletCount'].sum() == 0:
my_night_sum = np.nan
else:
my_night_sum = day_night_df[day_night_df.DayNight == 'night'].shape[0]
# second element is dataframe
# first sum pellets (if sum=0, there were no data)
if mouse_24hrs_dfs[i][j][1]['PelletCount'].sum() == 0:
my_sum = np.nan
else: # number of rows of data is the sum of all pellets
my_sum = mouse_24hrs_dfs[i][j][1].shape[0]
mouse_name = "Mouse " + str(mouse_24hrs_dfs[i][j][1]['Mouse'].iloc[0])
mouse_name_day = mouse_name + "_Day"
mouse_name_night = mouse_name + "_Night"
my_sums.append(my_sum)
my_day_sums.append(my_day_sum)
my_night_sums.append(my_night_sum)
df = | pd.DataFrame({mouse_name:my_sums}) | pandas.DataFrame |
import streamlit as st
import streamlit.components.v1 as stc
# Text Cleaning Pkgs
import neattext as nt
import neattext.functions as nfx
from collections import Counter
import pandas as pd
# Text Viz Pkgs
from wordcloud import WordCloud
from textblob import TextBlob
# Data Viz Pkgs
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
import altair as alt
from PIL import Image
import os
from pathlib import Path
HTML_BANNER = """
<div style="background-color:#3872fb;padding:10px;border-radius:10px;border-style:ridge;">
<h1 style="color:white;text-align:center;">Text Analysis NLP App </h1>
</div>
"""
def get_most_common_tokens(docx,num=10):
word_freq = Counter(docx.split())
most_common_tokens = word_freq.most_common(num)
return dict(most_common_tokens)
def plot_most_common_tokens(docx,num=10):
word_freq = Counter(docx.split())
most_common_tokens = word_freq.most_common(num)
x,y = zip(*most_common_tokens)
fig = plt.figure(figsize=(20,10))
plt.bar(x,y)
plt.title("Most Common Tokens")
plt.xticks(rotation=45)
plt.show()
st.pyplot(fig)
def plot_wordcloud(docx):
mywordcloud = WordCloud().generate(docx)
fig = plt.figure(figsize=(20,10))
plt.imshow(mywordcloud,interpolation='bilinear')
plt.axis('off')
st.pyplot(fig)
def plot_mendelhall_curve(docx):
word_length = [ len(token) for token in docx.split()]
word_length_count = Counter(word_length)
sorted_word_length_count = sorted(dict(word_length_count).items())
x,y = zip(*sorted_word_length_count)
fig = plt.figure(figsize=(20,10))
plt.plot(x,y)
plt.title("Plot of Word Length Distribution")
plt.show()
st.pyplot(fig)
def plot_mendelhall_curve_2(docx):
word_length = [ len(token) for token in docx.split()]
word_length_count = Counter(word_length)
sorted_word_length_count = sorted(dict(word_length_count).items())
x,y = zip(*sorted_word_length_count)
mendelhall_df = | pd.DataFrame({'tokens':x,'counts':y}) | pandas.DataFrame |
import logging
import copy
import yfinance as yf
import pandas as pd
import numpy as np
import pandas as pd
from pypfopt import black_litterman
from pypfopt.expected_returns import mean_historical_return
from pypfopt.black_litterman import BlackLittermanModel
from pypfopt.risk_models import CovarianceShrinkage
from sklearn.linear_model import LinearRegression
from typing import Dict, List
logging.basicConfig(filename='output.log', filemode='a',
format='%(asctime)s - %(levelname)-4s [%(filename)s:%(lineno)d] %(message)s', level=logging.INFO)
class MarketModels:
def __init__(self, historical_prices: pd.DataFrame, model: str, views_dict: Dict[str, float] = {},
confidences: List[float] = [], mcaps: pd.DataFrame = pd.DataFrame(),
ff_factors_df: pd.DataFrame = pd.DataFrame()) -> None:
self.historical_prices_df = historical_prices # they have to be backfilled
self.tickers = list(historical_prices.columns) # tickers lst
self.model_summary = {} # dictionary containingy the summary
# data validation for views and confidences
assert len(views_dict) == len(confidences), "Views and confidences need to be of the same size"
self.views_dict = views_dict
self.confidences = confidences
self.S = None # covar matrix historical
self.mu = None # mean historical returns
# get the market prices for the sp500 -> main index asset
logging.info(f"Initiating download of the main index: 'sp500'")
self.sp500 = yf.download("SPY", period="max")["Adj Close"]
# bl params
self.delta = None # market implied risk aversion
self.market_prior = None # compute the market priors -> this needs to be done according to a parameter
self.mcaps = mcaps
#self.market_prior = self.market_priors(self.mkt_data_reader.mcaps, self.delta, self.S)
# ff params
self.ff_factors = ff_factors_df # ff factors df (can get them form the dr class)
self.df_stocks_ff = None
self.risk_factors = list(self.ff_factors.columns)
self.er_fama_df = None # df of expected returns of the ff model
self.ff_betas = None # ff-betas dict
self.ff_scores = None # ff-R^2 of the stocks
self.ret_ff = None # annualized expected (mean) normal (not log) returns of the ff-model pd.Series
if model == "bl":
self.prepare_black_litterman(include_ff = False) # call the prepare bl method
elif model == "bl-ff":
self.prepare_black_litterman(include_ff = True) # prepare bl method with fama-french as views
elif model == "vanilla-ff":
self.prepare_ff()
def prepare_ff(self):
logging.info(f"Computing the expected returns and covar matrix given the FF model")
# compute log returns
ln_rt = (np.log(self.historical_prices_df / self.historical_prices_df.shift(1)))[1:] # log returns
ln_rt.index = pd.to_datetime(ln_rt.index, format= '%Y%m%d') # format date
self.df_stocks_ff = ln_rt.merge(self.ff_factors, left_index = True, right_index = True) # join with the ff factors to expand the dataset
ff_factors_cols = list(self.ff_factors.columns) # columns of the FF factors -> Here we could remove/add
betas={}
scores={}
er_fama = pd.DataFrame()
for ticker in self.tickers:
ff_factors_ticker_cols = ff_factors_cols + [ticker]
ff_factors_ticker_df = | pd.DataFrame() | pandas.DataFrame |
import json
import os
import shutil
import jsonpickle
import pickle
import pandas as pd
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
# Create your views here.
from django.views import generic
from rest_framework.authentication import BasicAuthentication
from rest_framework.response import Response
from rest_framework.views import APIView
from python_back_end.exceptions import DataHolderException
from python_back_end.triangle_formatting.triangle_pipeline import TrianglePipeline
from python_back_end.triangle_formatting.triangle_utils import InputMatcher
from python_back_end.utilities.sheet_io import SheetWriter
from python_back_end.utilities.state_handling import DataHolder
from python_back_end.utilities.sheet_io import ExcelLoader
from python_back_end.program_settings import PROGRAM_DIRECTORIES as pdir
from python_back_end.program_settings import PROGRAM_STRINGS as ps
from python_back_end.triangle_formatting.triangle_rendering import RowParser
from python_back_end.triangle_formatting.triangle_templater import TriangleTemplater
from rest_framework.authentication import SessionAuthentication
from rest_framework.parsers import FileUploadParser, MultiPartParser
from django.core.files.uploadedfile import TemporaryUploadedFile, InMemoryUploadedFile
import tempfile
import xlrd
class CsrfExemptSessionAuthentication(SessionAuthentication):
def enforce_csrf(self, request):
return # To not perform the csrf check previously happening
class DUMMY_ConnectDataAPIView(APIView):
#Skips CSRF verification
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
with open("test_file_henrik", "rb") as f:
response_data = pickle.load(f)
return Response({'data': response_data})
class ChangeDimensionAPIView(APIView):
#Skips CSRF verification
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
# Need to post - str_data_holder, output triangles (templates)
str_data_holder = request.data.get('str_data_holder')
data_holder = DataHolder.decode(str_data_holder)
response_data = {}
if data_holder is None:
raise ValueError("No data holder found")
elif data_holder.n == 0:
raise ValueError("No sheets in data holder")
#Recieve triangle formats
user_defined_triangles = request.data.get('templates')
try:
#DataHolder manipulation
data_holder, group_ids, sheet_names = RowParser.set_card_ids(user_defined_triangles, data_holder)
user_defined_triangles = InputMatcher.match_triangles_to_output(user_defined_triangles, data_holder)
user_defined_triangles = RowParser.parse_output_from_triangle_forms(user_defined_triangles, data_holder)
except DataHolderException as err:
data = {}
data['message'] = err.message
data['dh'] = err.dh
return Response({'response_error': data})
#SheetWriter.trngs_to_existing_excel(user_defined_triangles, pdir.TEMP_DIR + ps.OUTPUT_NAME + filename)
response_data["group_ids"] = group_ids
response_data['output_triangles'] = user_defined_triangles
response_data["unit_triangles"] = ChangeDimensionAPIView.make_unit_triangle_list(data_holder)
return Response({'data': response_data})
@staticmethod
def make_unit_triangle_list(data_holder):
unit_triangles = []
# Needed fields, .card_id, .roles, orig_sheet_name, .name, .df_data.columns.values, df_data.values
for ds in data_holder:
triangle = {}
triangle["rows"] = ds.df_data.values.tolist()
triangle["headers"] = ds.df_data.columns.values.tolist()
triangle["name"] = ds.name
triangle["orig_sheet_name"] = ds.orig_sheet_name
triangle["roles"] = ds.roles
triangle["card_id"] = ds.card_id
triangle["id"] = ds.id
triangle["fit_for_output"] = ds.fit_for_output
unit_triangles.append(triangle)
return unit_triangles
class ConnectDataAPIView(APIView):
#Skips CSRF verification
authentication_classes = (CsrfExemptSessionAuthentication, BasicAuthentication)
def post(self, request):
#Recieve name of file
filename = request.data.get('fileName')
# Build data holder
sr_list = jsonpickle.decode(request.data['sr_list'])
selected_sheets = request.data['selected_sheets']
data_holder = DataHolder(filename)
for sr in sr_list:
if sr.sheet_name in selected_sheets:
data_holder.add_sheet(sr.sheet_name, | pd.DataFrame(columns=sr.headers, data=sr.row_vals) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 09:23:51 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
import re
from pprint import pprint
from collections import Counter
from tableone import TableOne
from sdv.evaluation import evaluate
from sdv.metrics.relational import KSTestExtended
from sdv.metrics.tabular import CSTest, KSTest
from sdv.metrics.tabular import BNLikelihood
from sdv.metrics.tabular import LogisticDetection, SVCDetection
from sdv.metrics.tabular import BinaryAdaBoostClassifier
from feature_data_imputation import data_imputation
warnings.filterwarnings("ignore")
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
sys.path.append(os.path.abspath("/Users/rayin/Google Drive/Harvard/5_data/UDN/work/code/"))
#import all the gene available data from 'variant_clean.csv'
case_gene_clean = pd.read_csv('data/processed/variant_clean.csv', index_col=0)
#case_gene_clean['\\12_Candidate variants\\03 Interpretation\\'].replace('pathogenic', 1, inplace=True)
#case_gene_clean['\\12_Candidate variants\\03 Interpretation\\'].replace('less_pathogenic', 0, inplace=True)
label = case_gene_clean['\\12_Candidate variants\\03 Interpretation\\'].reset_index()
label = label[label.columns[1]]
#Extract demographic information from 'case_gene_filter_labeled.csv'
case_gene_filter_labeled = pd.read_csv('data/processed/case_gene_update.csv', index_col=0)
case_demographics = []
for i in range(0, len(case_gene_clean)):
for j in range(0, len(case_gene_filter_labeled)):
if case_gene_clean['\\000_UDN ID\\'].iloc[i] == case_gene_filter_labeled['\\000_UDN ID\\'].iloc[j]:
case_demographics.append(case_gene_filter_labeled.iloc[j])
break
case_demographics = pd.DataFrame(case_demographics)
case_demographics = case_demographics.reset_index()
case_demographics = case_demographics.iloc[:,2:10]
patient_demographics = | pd.concat([case_demographics, label], axis=1) | pandas.concat |
# -*- coding:utf-8 -*-
"""
Seamese architecture+abcnn
"""
from __future__ import division
import random
import os
import time
import datetime
import copy
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix, roc_curve, auc
from keras.utils import to_categorical
import tensorflow as tf
FLAGS = tf.flags.FLAGS
from tensorflow.contrib import learn
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib import rnn
from nltk.stem import SnowballStemmer
import re
import jieba
from string import punctuation
random.seed(2018)
np.random.seed(2018)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Data loading path
# tf.flags.DEFINE_string("train_data_file", "H:/tb/project0/quora/quora_duplicate_questions.tsv", "train data path.")
# tf.flags.DEFINE_string("model_data_path", "H:/tb/project0/quora/model/", "model path for storing.")
# tf.flags.DEFINE_string("train_data_file", "E:/data/quora-duplicate/train.tsv", "train data path.")
tf.flags.DEFINE_string("train_data_file", "D:/DF/sentence_theme_based_sentiment/data/train.csv", "train data path.")
tf.flags.DEFINE_string("test_data_file", "D:/DF/sentence_theme_based_sentiment/data/test_public.csv", "train data path.")
tf.flags.DEFINE_string("result_file", "D:/DF/sentence_theme_based_sentiment/data/submission_result.csv", "train data path.")
tf.flags.DEFINE_string("dictionary", "./utils/dictionary.txt", "dictionary path.")
tf.flags.DEFINE_string("stoplist", "./utils/stoplist.txt", "stoplist path.")
tf.flags.DEFINE_string("pretrained_word_emb", "./utils/word2vec.txt", "stoplist path.")
tf.flags.DEFINE_string("model_data_path", "D:/DF/sentence_theme_based_sentiment/model/", "model path for storing.")
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the negative data.")
# Model Hyperparameters
tf.flags.DEFINE_integer("subject_class", 10, "number of classes (default: 2)")
tf.flags.DEFINE_integer("sentiment_class", 3, "number of classes (default: 2)")
tf.flags.DEFINE_integer("subject_sentiment_class", 30, "number of classes (default: 2)")
tf.flags.DEFINE_float("lr", 0.002, "learning rate (default: 0.002)")
tf.flags.DEFINE_integer("embedding_dim", 300, "Dimensionality of character embedding (default: 128)")
tf.flags.DEFINE_integer("sentence_len", 30, "Maximum length for sentence pair (default: 50)")
tf.flags.DEFINE_string("filter_sizes", "3,4,5", "Comma-separated filter sizes (default: '3,4,5')")
tf.flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
tf.flags.DEFINE_float("dropout_keep_prob", 0.3, "Dropout keep probability (default: 0.5)")
tf.flags.DEFINE_float("l2_reg_lambda", 0.2, "L2 regularization lambda (default: 0.0)")
# LSTM Hyperparameters
tf.flags.DEFINE_integer("hidden_dim", 128, "Number of filters per filter size (default: 128)")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 256, "Batch Size (default: 64)")
tf.flags.DEFINE_integer("num_epochs", 30000, "Number of training epochs (default: 200)")
tf.flags.DEFINE_integer("evaluate_every", 100, "Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 100, "Save model after this many steps (default: 100)")
tf.flags.DEFINE_integer("num_checkpoints", 5, "Number of checkpoints to store (default: 5)")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
tf.flags.DEFINE_string("last_layer", 'FC', "Use FC or GAP as the last layer")
class Utils:
@staticmethod
def evaluation(y_true, y_predict):
accuracy = accuracy_score(y_true, y_predict)
precision, recall, f1, support = precision_recall_fscore_support(y_true, y_predict)
print('accuracy:' + str(accuracy))
print('precision:' + str(precision))
print('recall:' + str(recall))
print('f1:' + str(f1))
def show_model_effect(self, history, model_path):
"""将训练过程中的评估指标变化可视化"""
# summarize history for accuracy
plt.plot(history.history["acc"])
plt.plot(history.history["val_acc"])
plt.title("Model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(model_path+"/Performance_accuracy.jpg")
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(model_path+"/Performance_loss.jpg")
class DataHelpers:
def flatten(self, l):
return [item for sublist in l for item in sublist]
def data_cleaning(self, text, remove_stop_words=False):
# Clean the text, with the option to remove stop_words and to stem words.
stop_words = [' ', '我', '你', '还', '会', '因为', '所以', '这', '是', '和', '他们',
'了', '的', '也', '哦', '这个', '啊', '说', '知道', '哪里', '吧', '哪家',
'想', '啥', '怎么', '呢', '那', '嘛', '么',
'有', '指', '楼主', '私信', '谁', '可能', '像', '这样', '到底', '哪个', '看', '我们',
'只能', '主要', '些', '认为', '肯定', '森', '来说', '觉得',
'确实', '一些', '而且', '一点', '比较', '个人', '感受', '适时', '开过',
'汉兰达', '森林人', '冠道', '昂科威', '楼兰',
'.', '。', ',', ',', '?', '?', '!', '!', ';', ';', ':', ':', '"', '\'', '“', '”',
'·', '~', '@', '#', '=', '+', '(', ')', '(', ')', '[', ']', '【', '】', '*', '&', '…', '^', '%',
]
# Clean the text
text = re.sub(r"[0-9]", " ", text)
# Remove punctuation from text
# text = ''.join([c for c in text if c not in punctuation])
# Optionally, remove stop words
if remove_stop_words:
text = text.split()
text = [w for w in text if not w in stop_words]
text = " ".join(text)
# Return a list of words
return text
def process_questions(self, question_list, df):
'''transform questions and display progress'''
for question in df['sentence_seq']:
question_list.append(self.text_to_wordlist(question, remove_stop_words=False))
if len(question_list) % 1000 == 0:
progress = len(question_list) / len(df) * 100
print("{} is {}% complete.".format('sentence sequence ', round(progress, 1)))
return question_list
def sentence_cut(self, data, dict=True):
sentence_seq = []
if dict:
jieba.load_userdict(FLAGS.dictionary)
for sentence in data['content']:
seg_list = jieba.cut(sentence, cut_all=False)
# print("Default Mode: " + "/ ".join(seg_list)) # 精确模式
sentence_seg = ' '.join(seg_list)
sentence_clean = self.data_cleaning(sentence_seg, remove_stop_words=True)
# print(sentence_clean)
sentence_seq.append(sentence_clean)
if len(sentence_seq) % 1000 == 0:
progress = len(sentence_seq) / len(data) * 100
print("{} is {}% complete.".format('sentence sequence ', round(progress, 1)))
data['sentence_seq'] = sentence_seq
# print(data['sentence_seq'])
return data
def batch_iter(self, data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data) - 1) / batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
class Text_BiLSTM(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(self, sequence_length, num_classes, vocab_size, embedding_size, pretrained_embedding=None, l2_reg_lambda=0.0):
self.sequence_length = sequence_length
self.num_classes = num_classes
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.pretrained_embedding = pretrained_embedding
self.l2_reg_lambda = l2_reg_lambda
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_right")
self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
self.learning_rate = tf.placeholder(tf.float32, name="learning_rate")
self.embedding_placeholder = tf.placeholder(tf.float32, [self.vocab_size, self.embedding_size], name="pretrained_emb")
# with tf.device('/cpu:0'), tf.name_scope("embedding"):
# self.W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name="W_emb")
# print(self.W)
# self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
# self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# print(self.embedded_chars_expanded)
# h_conv1, pooled_2, pooled_3 = self.branch_am_cnn(self.embedded_chars_expanded)
self.lookup_layer_op()
self.biLSTM_layer_op()
# self.scores_o = self.project_layer_op()
# print(self.scores_o)
# self.h_pool_flat = tf.contrib.layers.flatten(pooled_3)
# print(self.h_pool_flat)
#
#
# # Add dropout
# with tf.name_scope("dropout1"):
# self.h_drop_1 = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# print(self.h_drop_1)
#
# with tf.name_scope("fc1"):
# W_fc1 = tf.get_variable("W_fc1", shape=[896, 128], initializer=tf.contrib.layers.xavier_initializer())
# b_fc1 = tf.Variable(tf.constant(0.1, shape=[128]), name="b_fc1")
# # self.l2_loss_fc1 += tf.nn.l2_loss(W_fc1)
# # self.l2_loss_fc1 += tf.nn.l2_loss(b_fc1)
# self.z_fc1 = tf.nn.xw_plus_b(self.h_drop_1, W_fc1, b_fc1, name="scores_fc1")
# self.o_fc1 = tf.nn.relu(self.z_fc1, name="relu_fc1")
#
# # Add dropout
# with tf.name_scope("dropout2"):
# self.h_drop_2 = tf.nn.dropout(self.o_fc1, self.dropout_keep_prob)
# print(self.h_drop_2)
# Final (unnormalized) scores and predictions
# with tf.name_scope("output"):
# # W_o = tf.get_variable("W_o", shape=[128, self.num_classes], initializer=tf.contrib.layers.xavier_initializer())
# # b_o = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b_o")
# # l2_loss += tf.nn.l2_loss(W_o)
# # l2_loss += tf.nn.l2_loss(b_o)
# # # self.scores_o = tf.reshape(self.h_drop_2, [-1, 128])
# # self.scores_o = tf.nn.xw_plus_b(self.h_drop_2, W_o, b_o, name="scores_o")
# self.predictions = tf.argmax(self.scores_o, 1, name="predictions")
# print(self.predictions)
#
# # Accuracy
# with tf.name_scope("accuracy"):
# correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
# self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
#
# # Calculate mean cross-entropy loss
# with tf.name_scope("loss"):
# losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores_o, labels=self.input_y)
# self.loss = tf.reduce_mean(losses) + self.l2_reg_lambda * l2_loss
def biLSTM_layer_op(self):
l2_loss = tf.constant(0.0)
with tf.variable_scope("bi-lstm"):
n_layers = 1
x = tf.transpose(self.word_embeddings, [1, 0, 2])
print('1111')
print(x)
# # Reshape to (n_steps*batch_size, n_input)
x = tf.reshape(x, [-1, self.embedding_size])
# # Split to get a list of 'n_steps' tensors of shape (batch_size, n_input)
# # x = tf.split(x, n_steps, 0)
x = tf.split(axis=0, num_or_size_splits=self.sequence_length, value=x)
print(x)
# Define lstm cells with tensorflow
# Forward direction cell
with tf.name_scope("fw_biLSTM"), tf.variable_scope("fw_biLSTM"):
print(tf.get_variable_scope().name)
# fw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# lstm_fw_cell = rnn.DropoutWrapper(fw_cell, output_keep_prob=dropout)
# lstm_fw_cell_m = rnn.MultiRNNCell([lstm_fw_cell]*n_layers, state_is_tuple=True)
def lstm_fw_cell():
fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forget_bias=1.0, state_is_tuple=True)
return tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
# lstm_fw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_fw_cell() for _ in range(n_layers)], state_is_tuple=True)
fw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forget_bias=1.0, state_is_tuple=True)
print(fw_cell)
lstm_fw_cell_m = tf.contrib.rnn.DropoutWrapper(fw_cell, output_keep_prob=self.dropout_keep_prob)
# Backward direction cell
with tf.name_scope("bw_biLSTM"), tf.variable_scope("bw_biLSTM"):
# bw_cell = rnn.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
# lstm_bw_cell = rnn.DropoutWrapper(bw_cell, output_keep_prob=dropout)
# lstm_bw_cell_m = rnn.MultiRNNCell([lstm_bw_cell]*n_layers, state_is_tuple=True)
def lstm_bw_cell():
bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forget_bias=1.0, state_is_tuple=True)
return tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
# lstm_bw_cell_m = tf.contrib.rnn.MultiRNNCell([lstm_bw_cell() for _ in range(n_layers)], state_is_tuple=True)
bw_cell = tf.contrib.rnn.BasicLSTMCell(FLAGS.hidden_dim, forget_bias=1.0, state_is_tuple=True)
lstm_bw_cell_m = tf.contrib.rnn.DropoutWrapper(bw_cell, output_keep_prob=self.dropout_keep_prob)
# Get lstm cell output
# try:
with tf.name_scope("full_biLSTM"), tf.variable_scope("full_biLSTM"):
# outputs, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
# self.output, _, _ = rnn.static_bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
output, state = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, self.word_embeddings, dtype=tf.float32)
# outputs, _ = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x, dtype=tf.float32)
# except Exception: # Old TensorFlow version only returns outputs not states
# outputs = tf.nn.bidirectional_rnn(lstm_fw_cell_m, lstm_bw_cell_m, x,
# dtype=tf.float32)
print('2222')
print(output)
self.output = tf.concat(output, 2)
print(self.output)
# return outputs[-1]
# return outputs
with tf.name_scope("mean_pooling_layer"):
self.out_put = tf.reduce_mean(self.output, 1)
avg_pool = tf.nn.dropout(self.out_put, keep_prob=self.dropout_keep_prob)
print("pool", avg_pool)
with tf.name_scope('output'):
# 双向
W = tf.Variable(tf.truncated_normal([int(2*FLAGS.hidden_dim), self.num_classes], stddev=0.1), name='W')
b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name='b')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.logits = tf.nn.xw_plus_b(avg_pool, W, b, name='scores')
self.y_pred_cls = tf.argmax(self.logits, 1, name='predictions')
with tf.name_scope("loss"):
# 损失函数,交叉熵
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)
self.loss = tf.reduce_mean(cross_entropy)+self.l2_reg_lambda * l2_loss
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Define Training procedure
self.global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss)
self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)
# Keep track of gradient values and sparsity (optional)
grad_summaries = []
for g, v in grads_and_vars:
if g is not None:
grad_hist_summary = tf.summary.histogram("{}/grad/hist".format(v.name), g)
sparsity_summary = tf.summary.scalar("{}/grad/sparsity".format(v.name), tf.nn.zero_fraction(g))
grad_summaries.append(grad_hist_summary)
grad_summaries.append(sparsity_summary)
self.grad_summaries_merged = tf.summary.merge(grad_summaries)
# Summaries for loss and accuracy
self.loss_summary = tf.summary.scalar("loss", self.loss)
self.acc_summary = tf.summary.scalar("accuracy", self.accuracy)
# Train Summaries
self.train_summary_op = tf.summary.merge([self.loss_summary, self.acc_summary, self.grad_summaries_merged])
# Dev summaries
self.dev_summary_op = tf.summary.merge([self.loss_summary, self.acc_summary])
def project_layer_op(self):
with tf.variable_scope("proj"):
W = tf.get_variable(name="W",
shape=[2 * FLAGS.hidden_dim, self.num_classes],
initializer=tf.contrib.layers.xavier_initializer(),
dtype=tf.float32)
b = tf.get_variable(name="b",
shape=[self.num_classes],
initializer=tf.zeros_initializer(),
dtype=tf.float32)
s = tf.shape(self.output)
#此时output的shape{batch_size*sentence,2*hidden_dim]
self.output = tf.reshape(self.output, [-1, 2*FLAGS.hidden_dim])
#pred的shape为[batch_size*sentence,num_classes]
pred = tf.matmul(self.output, W) + b
# pred = tf.nn.tanh(pred, name='tanh_layer') # CT
#logits的shape为[batch,sentence,num_classes]
self.logits = tf.reshape(pred, [-1, s[1], self.num_classes])
print(self.logits)
return self.logits
def lookup_layer_op(self):
with tf.variable_scope("words"):
# self._word_embeddings = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), dtype=tf.float32, trainable=True, name="W_emb")
# word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
self._word_embeddings = tf.Variable(self.pretrained_embedding, trainable=True, dtype=tf.float32, name="embedding")
word_embeddings = tf.nn.embedding_lookup(params=self._word_embeddings, ids=self.input_x, name="word_embeddings")
# W = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.embedding_size]), trainable=True, name="W")
# self.embedding_init = W.assign(self.embedding_placeholder)
# word_embeddings = tf.nn.embedding_lookup(params=W, ids=self.input_x, name="word_embeddings")
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout_keep_prob)
class Train:
# def show_prediction(self):
# dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
# total_dev_correct = 0
# total_dev_loss = 0
# print("\nEvaluation:")
# for dev_batch in dev_batches:
# x_dev_batch, y_dev_batch = zip(*dev_batch)
# loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
# total_dev_correct += dev_correct * len(y_dev_batch)
def load_word2vec(self, filename):
vocab = []
embd = []
file = open(filename, 'r', encoding='utf8')
print('Word2Vec start')
for line in file.readlines():
row = line.strip().split(' ')
vocab.append(row[0])
embd.append(row[1:])
# print(len(row[1:]))
print('Loaded Word2Vec!')
file.close()
return vocab, embd
def generate_extra_samples(self, rows, num):
extra_samples = []
print(rows)
for i in range(num):
row = random.sample(rows, 1)
extra_samples.extend(row)
return extra_samples
def over_sampling(self, x_train, y_train, label_distribution, dic_label, prop=1):
print("shape before upsampling is {0}".format(x_train.shape))
x_upsample = copy.deepcopy(x_train)
y_upsample = copy.deepcopy(y_train)
shape_x = x_train.shape
most_label = label_distribution.index[0]
# most_label_count = label_distribution[0]
for other_label in label_distribution.index:
# print(other_label)
if other_label == most_label:
rows_valid = []
for row in range(shape_x[0]):
if (y_train[row, :] == dic_label[most_label]).all():
rows_valid.append(row)
most_label_count = len(rows_valid)
print("most label is {0}, count is {1}".format(most_label, most_label_count))
# x_upsample = np.append(x_upsample, x_train[rows_valid, :], axis=0)
# y_upsample = np.append(y_upsample, y_train[rows_valid, :], axis=0)
pass
else:
rows_valid = []
for row in range(shape_x[0]):
# print(y_train[row, :])
# print(dic_label[other_label])
if (y_train[row, :] == dic_label[other_label]).all():
rows_valid.append(row)
# extra_sample = random.sample(rows_valid, int(prop * (most_label_count-label_distribution[other_label])))
extra_sample = self.generate_extra_samples(rows_valid, int(prop * (most_label_count-len(rows_valid))))
print("original label count is {0}".format(label_distribution[other_label]))
print("extra label count is {0}".format(len(extra_sample)))
x_upsample = np.append(x_upsample, x_train[extra_sample, :], axis=0)
print("shape is {0}".format(x_upsample.shape))
y_upsample = np.append(y_upsample, y_train[extra_sample, :], axis=0)
# x_upsample = np.append(x_upsample, x_train, axis=0)
# y_upsample = np.append(y_upsample, y_train, axis=0)
shuffle_indices = np.random.permutation(np.arange(y_upsample.shape[0]))
x_upsample = x_upsample[shuffle_indices]
print("shape is {0}".format(x_upsample.shape))
y_upsample = y_upsample[shuffle_indices]
print("shape after upsampling is {0}".format(x_upsample.shape))
return x_upsample, y_upsample
def train(self, x_train, y_train, x_dev, y_dev, x_test, vocab_processor, vocab_size, embedding):
print("length of len(vocab_processor.vocabulary_) is {0}".format(vocab_size))
with tf.Graph().as_default():
self.lr = FLAGS.lr
session_conf = tf.ConfigProto(allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
# sess = tf.Session()
with sess.as_default():
# cnn = TextCNN(sequence_length=x_train.shape[1],
# num_classes=FLAGS.sentiment_class,
# vocab_size=len(vocab_processor.vocabulary_),
# embedding_size=FLAGS.embedding_dim)
cnn = Text_BiLSTM(sequence_length=x_train.shape[1],
num_classes=FLAGS.subject_sentiment_class,
# vocab_size=len(vocab_processor.vocabulary_),
vocab_size=vocab_size,
embedding_size=FLAGS.embedding_dim,
pretrained_embedding=embedding)
# train_op = tf.train.AdamOptimizer(learning_rate=FLAGS.lr, beta1=0.9, beta2=0.999,
# epsilon=1e-8).minimize(cnn.loss)
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", timestamp))
print("Writing to {}\n".format(out_dir))
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.num_checkpoints)
# Write vocabulary
vocab_processor.save(os.path.join(out_dir, "vocab"))
# Initialize all variables
sess.run(tf.global_variables_initializer())
# sess.run(cnn.embedding_init, feed_dict={cnn.embedding_placeholder: embedding})
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: FLAGS.dropout_keep_prob,
cnn.learning_rate: self.lr
}
_, step, summaries, loss, accuracy = sess.run([cnn.train_op, cnn.global_step, cnn.train_summary_op, cnn.loss, cnn.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
train_summary_writer.add_summary(summaries, step)
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0,
cnn.learning_rate: self.lr
}
step, summaries, loss, accuracy = sess.run([cnn.global_step, cnn.dev_summary_op, cnn.loss, cnn.accuracy], feed_dict)
time_str = datetime.datetime.now().isoformat()
# print("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
if writer:
writer.add_summary(summaries, step)
return loss, accuracy
# Generate batches
batches = DataHelpers().batch_iter(list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, cnn.global_step)
if current_step % FLAGS.evaluate_every == 0:
dev_batches = DataHelpers().batch_iter(list(zip(x_dev, y_dev)), FLAGS.batch_size, 1)
total_dev_correct = 0
total_dev_loss = 0
print("\nEvaluation:")
for dev_batch in dev_batches:
x_dev_batch, y_dev_batch = zip(*dev_batch)
loss, dev_correct = dev_step(x_dev_batch, y_dev_batch)
total_dev_correct += dev_correct * len(y_dev_batch)
total_dev_loss += loss * len(y_dev_batch)
# dev_step(x_left_dev, x_right_dev, y_dev, writer=dev_summary_writer)
dev_accuracy = float(total_dev_correct) / len(y_dev)
dev_loss = float(total_dev_loss) / len(y_dev)
print('Accuracy on dev set: {0}, loss on dev set: {1}'.format(dev_accuracy, dev_loss))
print("Evaluation finished")
if current_step % FLAGS.checkpoint_every == 0:
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
if current_step % 300 == 0:
self.lr = self.lr / 4
if current_step % 700 == 0:
break
feed_dict = {
cnn.input_x: x_dev,
cnn.dropout_keep_prob: 1.0,
}
y_pred = sess.run([cnn.y_pred_cls], feed_dict)
print(y_pred)
test = pd.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
feed_dict = {
cnn.input_x: x_test,
cnn.dropout_keep_prob: 1.0,
}
y_pred = sess.run([cnn.y_pred_cls], feed_dict)
print(y_pred)
print(type(y_pred))
print(type(y_pred[0]))
print(type(y_pred[0].tolist()))
test['predict'] = y_pred[0].tolist()
test.to_csv(FLAGS.result_file, encoding='utf8', index=False)
# self.show_prediction()
def preprocess(self):
# 读取训练数据
data = pd.read_csv(FLAGS.train_data_file, sep=",", error_bad_lines=False)
test = pd.read_csv(FLAGS.test_data_file, sep=",", error_bad_lines=False)
print(pd.value_counts(data['subject']))
print(pd.value_counts(data['sentiment_value']))
print(pd.value_counts(data['sentiment_word']))
# 根据sentiment word构建字典
# sentiment_word = set(data['sentiment_word'])
# sentiment_word.remove(np.nan)
# with open(FLAGS.dictionary, 'w') as f:
# for word in sentiment_word:
# print(word)
# f.write(word+'\n')
# f.close()
# print("dictionary done!")
data = data.fillna('空')
test = test.fillna('空')
# 数据切分
data = DataHelpers().sentence_cut(data=data, dict=True)
test = DataHelpers().sentence_cut(data=test, dict=True)
# data[['sentence_seq']].to_csv('D:/Data/sentence/train.csv', encoding='utf8', index=False)
vocab, embd = self.load_word2vec(FLAGS.pretrained_word_emb)
vocab_size = len(vocab)
embedding_dim = len(embd[0])
embedding = np.asarray(embd)
print(embedding.shape)
# Build vocabulary
# max_document_length = max([len(x.split(" ")) for x in x_text])
max_document_length = FLAGS.sentence_len
# vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length, min_frequency=2)
vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
# vocab_processor.fit(data['sentence_seq'])
print('vocab')
print(vocab)
vocab_processor.fit(vocab)
# x = np.array(list(vocab_processor.fit_transform(x_text)))
x = np.array(list(vocab_processor.transform(data['sentence_seq'])))
x_test = np.array(list(vocab_processor.transform(test['sentence_seq'])))
# subject_dict = {'动力': 0, '价格': 1, '油耗': 2, '操控': 3, '舒适性': 4, '配置': 5, '安全性': 6, '内饰': 7, '外观': 8, '空间': 9}
# subject_numerical = []
# for subject in data['subject']:
# subject_numerical.append(subject_dict[subject])
# y = to_categorical(data['sentiment_value'], num_classes=FLAGS.sentiment_class)
# y = to_categorical(subject_numerical, num_classes=FLAGS.subject_class)
subject_dict = {'动力_-1': 0, '价格_-1': 1, '油耗_-1': 2, '操控_-1': 3, '舒适性_-1': 4, '配置_-1': 5, '安全性_-1': 6, '内饰_-1': 7, '外观_-1': 8, '空间_-1': 9,
'动力_0': 10, '价格_0': 11, '油耗_0': 12, '操控_0': 13, '舒适性_0': 14, '配置_0': 15, '安全性_0': 16, '内饰_0': 17, '外观_0': 18, '空间_0': 19,
'动力_1': 20, '价格_1': 21, '油耗_1': 22, '操控_1': 23, '舒适性_1': 24, '配置_1': 25, '安全性_1': 26, '内饰_1': 27, '外观_1': 28, '空间_1': 29}
data['subject_senti'] = data['subject']+'_'+data['sentiment_value'].astype('str')
label_distribution = | pd.value_counts(data['subject_senti']) | pandas.value_counts |
import numpy as np
import pandas as pd
from open_quant.labeling.multi_processing import mp_pandas
import sys
def test(a, b):
return a + b
def triple_barrier_method(close, events, pt_sl, molecule):
"""
Advances in Financial Machine Learning, Snippet 3.2, page 45.
Triple Barrier Labeling Method
Applies triple-barrier labeling method on time-series (molecule).
Returns DataFrame of timestamps of barrier touches.
:param close: (pd.Series) Close prices
:param events: (pd.Series) Event values calculated (CUSUM filter)
:param pt_sl: (np.array) Profit takin value 0; Stop loss value 1
:param molecule: (an array) Datetime index values
:return: (pd.DataFrame) Timestamps of when first barrier was touched
"""
# apply stop loss/profit taking, if it takes place before t1 (end of event)
events_ = events.loc[molecule]
out = events_[['t1']].copy(deep=True)
if pt_sl[0] > 0:
pt = pt_sl[0] * events_['target']
else:
pt = pd.Series(index=events.index) # NaNs
if pt_sl[1] > 0:
sl = -pt_sl[1] * events_['target']
else:
sl = | pd.Series(index=events.index) | pandas.Series |
import train_test_model
import pandas as pd
import numpy as np
#import math
import os, sys, time
from scipy.sparse import csr_matrix, save_npz, load_npz
import pickle
##########################################################################################
def usecases(predictions,item_vecs,model,movie_list=["Sliding Doors"],user_id=100,n_similar=20):
def predict_ratings(predictions,item_vecs,user_id):
item_vecs = predictions[1]
user_vec = predictions[0][user_id,:]
pred = user_vec.dot(item_vecs).toarray()[0].reshape(-1)
return pred
def similar_items(model,item_id,n_similar=10):
# Use implicit to get similar items.
movie_names = []
similar = model.similar_items(item_id, n_similar)
# Print the names of similar movies
for item in similar:
idx, rating = item
movie_names.append(movies.name.loc[movies.item_id == idx].iloc[0])
return movie_names
def recommendations(model,sparse_user_item,user_id=user_id):
# Use the implicit recommender.
recommended = model.recommend(user_id, sparse_user_item)
movies_recom = []
ratings_recom = []
# Get artist names from ids
for item in recommended:
idx, rating = item
movies_recom.append((movies.name.loc[movies.item_id == idx].iloc[0]))
ratings_recom.append(rating)
# Create a dataframe of artist names and scores
recommendations = pd.DataFrame({'movies': movies_recom, 'rating': ratings_recom})
return recommendations
#print("movie_list : ",movie_list, "User_id : ", user_id, "similar items : ", n_similar - 1)
predict_ratings = predict_ratings(predictions,item_vecs,user_id)
movies.name = movies.name.str.strip()
item_id = movies.item_id.loc[movies.name.isin(movie_list)].iloc[0]
similar_items = similar_items(als_model,item_id,n_similar)
recommendations = recommendations(als_model,sparse_user_item,user_id)
return predict_ratings, similar_items,recommendations
###########################################################################################
def main():
train_test_model.main()
movies = | pd.read_pickle("./output/movies.pkl") | pandas.read_pickle |
import gzip
import math
import os
import time
from collections import OrderedDict, namedtuple
from datetime import datetime as dt
from datetime import timedelta
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook
import ujson
LOGS = './logs/'
Window = namedtuple('Window', 'pid name start_time last_update focus_time exe cmd')
Event = namedtuple('Event', 'time category text index')
SEC_PER_HOUR = 60*60
HOUR_FORMAT = SEC_PER_HOUR/10**3
DAY = pd.Timedelta('1 day')
HOUR = | pd.Timedelta('1 hour') | pandas.Timedelta |
import pandas as pd
import os
import json
import matplotlib.pyplot as plt
import seaborn as sns
with open("config.json") as file:
config = json.load(file)
config[
"intermediate_data"
] = "nextcloud-znes/KlimaSchiff/result_data/emissions"
#
# df_n = pd.read_csv(
# "/home/admin/klimaschiff/intermediate_data_new/2015_sq/ship_emissions/ship_emissions_20150107.csv",
# nrows=100000)
# df_o = pd.read_csv(
# "/home/admin/klimaschiff/intermediate_data/2015_sq/ship_emissions/ship_emissions_20150107.csv",
# nrows=100000)
# df_n[["Propulsion-NMVOC [kg]", "Electrical-NMVOC [kg]"]].sum().sum() / df_o[["Propulsion-NMVOC [kg]", "Electrical-NMVOC [kg]"]].sum().sum()
# df_n[["Propulsion-CO [kg]", "Electrical-CO [kg]"]].sum().sum() / df_o[["Propulsion-CO [kg]", "Electrical-CO [kg]"]].sum().sum()
# df_n[["Propulsion-BC [kg]", "Electrical-BC [kg]"]].sum().sum() / df_o[["Propulsion-BC [kg]", "Electrical-BC [kg]"]].sum().sum()
# pd.concat([df_o.sum(), df_n.sum()], axis=1)
# df = pd.read_csv(
# os.path.join(scenario_path, "total_emissions_by_type_and_day.csv",),
# parse_dates=True,
# )
# df_m = df.unstack(level=1).resample("M").sum().unstack().unstack(level=0).swaplevel(0,1)
categories = [
"Tanker",
"Bulker",
"Container",
"Cruise",
"Cargo",
"Ro-Ro",
"Ro-Pax",
"MPV",
"Car Carrier",
"Diverse",
]
def category(row):
check = [cat for cat in categories if cat in row]
if check:
return check[0]
# aggregated numbers yearly --------------------------------------------
# df_time_agg = df_time.reset_index().groupby("category").sum()
# df_time_agg.to_csv("tables/yearly_emissions_by_shiptype_and_pollutant_SQ.csv")
#
# plot_data = (
# df_time_agg.drop(["CO2", "NOx", "CO [kg]", "SOx"], axis=1)
# .stack()
# .reset_index()
# )
# sns.barplot(x="component", y=0, hue="category", data=plot_data)
# #
#
# ax = df_time_agg[["NOx", "CO [kg]", "SOx"]].divide(1e9).plot(kind="bar")
# ax.set_ylabel("CO2 in Mio ton")
# ax.set_xlabel("Ship type")
# plt.savefig("figures/results/Agg_CO2_emissions_SQ.pdf")
# -----------------------------------------------------------------------------
# scenario comparison
# ---------------------------------------------------------------------------
scenarios = ["2015_sq"]#, "2030_low", "2030_high", "2040_low", "2040_high"]
pollutants = [
i + " [kg]"
for i in ["SOx", "NOx", "PM", "CO", "CO2", "ASH", "POA", "NMVOC", "BC"]
]
d_annual = {}
d_daily = {}
for scenario in scenarios:
scenario_path = os.path.join(
os.path.expanduser("~"), config["intermediate_data"], scenario,
)
df = pd.read_csv(
os.path.join(scenario_path, "total_emissions_by_type_and_day_" + scenario + ".csv"),
parse_dates=True,
)
# group by category ("Bulker", "Tanker" etc)
df["Shipclass"] = df["Unnamed: 1"].apply(lambda x: category(x))
df["Unnamed: 0"] = pd.to_datetime(df["Unnamed: 0"], format="%Y%m%d")
# remove 2014 data from results data set
df = df.set_index("Unnamed: 0")["2015"].reset_index()
# df.set_index(["Unnamed: 0","Unnamed: 1"], inplace=True)
df_sums = df.groupby(["Unnamed: 0", "Shipclass"]).sum()
d_daily[scenario] = df_sums
df_annual_sums = df_sums.sum(level=1, axis=0).T
df_annual_sums["Pollutant"] = [
i.split("-")[1] for i in df_annual_sums.index
]
df_annual_sums["Engine"] = [i.split("-")[0] for i in df_annual_sums.index]
df_annual_sums["All"] = df_annual_sums.sum(axis=1)
d_annual[scenario] = df_annual_sums # .groupby("Pollutant").sum()
a = d_annual["2015_sq"].groupby("Pollutant").sum().loc[pollutants].T
a = a.div(1e6) # kg -> Gg
a.columns = [i.strip(" [kg]") for i in a.columns]
a.to_latex(
"tables/annual_emissions_Gg_per_type_{}.tex".format(scenario),
label="tab:annual_emissions_Gg_per_type_{}".format(scenario),
caption="Annual emissions for each shiptype in Gg in the scenario {}.".format(
scenario
),
float_format="{:0.2f}".format,
)
# ----------------------------------------------------------------------------
# annual sums per per ship type
# ----------------------------------------------------------------------------
for scenario in d_daily:
_df = d_daily[scenario].T
_df["Pollutant"] = [row.split("-")[1] for row in df_annual_sums.index]
_df = _df.groupby("Pollutant").sum().loc[pollutants].T
tuples = _df.index.map(lambda x: (x[1], pd.to_datetime(x[0])))
_df.index = pd.MultiIndex.from_tuples(tuples, names=["class", "date"])
_df = _df.sum(level=0).div(1e6)
_df.loc["All"] = _df.sum()
_df.columns = [i.replace(" [kg]", " (Gg/year)") for i in _df.columns]
_df.sort_index(inplace=True)
_df.to_csv("tables/annual_emissions_Gg_per_type_{}.csv".format(scenario))
_df.to_latex(
"tables/annual_emissions_Gg_per_type_{}.tex".format(scenario),
label="tab:annual_emissions_Gg_per_type_{}".format(scenario),
caption="Annual emissions for each shiptype in Gg in the scenario: {}.".format(
scenario
),
float_format="{:0.0f}".format,
)
# ----------------------------------------------------------------------------
# timeseries plot average daily emissions
# ----------------------------------------------------------------------------
scenario = "2015_sq"
pollutant = "CO2 [kg]"
_df = d_daily[scenario].T
_df["Pollutant"] = [row.split("-")[1] for row in df_annual_sums.index]
_df = _df.groupby("Pollutant").sum().loc[pollutants].T
tuples = _df.index.map(lambda x: (x[1], | pd.to_datetime(x[0]) | pandas.to_datetime |
import nose
import warnings
import os
import datetime
import numpy as np
import sys
from distutils.version import LooseVersion
from pandas import compat
from pandas.compat import u, PY3
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, period_range, Index, Categorical)
from pandas.core.common import PerformanceWarning
from pandas.io.packers import to_msgpack, read_msgpack
import pandas.util.testing as tm
from pandas.util.testing import (ensure_clean,
assert_categorical_equal,
assert_frame_equal,
assert_index_equal,
assert_series_equal,
patch)
from pandas.tests.test_panel import assert_panel_equal
import pandas
from pandas import Timestamp, NaT, tslib
nan = np.nan
try:
import blosc # NOQA
except ImportError:
_BLOSC_INSTALLED = False
else:
_BLOSC_INSTALLED = True
try:
import zlib # NOQA
except ImportError:
_ZLIB_INSTALLED = False
else:
_ZLIB_INSTALLED = True
_multiprocess_can_split_ = False
def check_arbitrary(a, b):
if isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
assert(len(a) == len(b))
for a_, b_ in zip(a, b):
check_arbitrary(a_, b_)
elif isinstance(a, Panel):
assert_panel_equal(a, b)
elif isinstance(a, DataFrame):
assert_frame_equal(a, b)
elif isinstance(a, Series):
assert_series_equal(a, b)
elif isinstance(a, Index):
assert_index_equal(a, b)
elif isinstance(a, Categorical):
# Temp,
# Categorical.categories is changed from str to bytes in PY3
# maybe the same as GH 13591
if PY3 and b.categories.inferred_type == 'string':
pass
else:
tm.assert_categorical_equal(a, b)
elif a is NaT:
assert b is NaT
elif isinstance(a, Timestamp):
assert a == b
assert a.freq == b.freq
else:
assert(a == b)
class TestPackers(tm.TestCase):
def setUp(self):
self.path = '__%s__.msg' % tm.rands(10)
def tearDown(self):
pass
def encode_decode(self, x, compress=None, **kwargs):
with ensure_clean(self.path) as p:
to_msgpack(p, x, compress=compress, **kwargs)
return read_msgpack(p, **kwargs)
class TestAPI(TestPackers):
def test_string_io(self):
df = DataFrame(np.random.randn(10, 2))
s = df.to_msgpack(None)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
s = df.to_msgpack()
result = read_msgpack(compat.BytesIO(s))
tm.assert_frame_equal(result, df)
s = to_msgpack(None, df)
result = read_msgpack(s)
tm.assert_frame_equal(result, df)
with ensure_clean(self.path) as p:
s = df.to_msgpack()
fh = open(p, 'wb')
fh.write(s)
fh.close()
result = read_msgpack(p)
tm.assert_frame_equal(result, df)
def test_iterator_with_string_io(self):
dfs = [DataFrame(np.random.randn(10, 2)) for i in range(5)]
s = to_msgpack(None, *dfs)
for i, result in enumerate(read_msgpack(s, iterator=True)):
tm.assert_frame_equal(result, dfs[i])
def test_invalid_arg(self):
# GH10369
class A(object):
def __init__(self):
self.read = 0
tm.assertRaises(ValueError, read_msgpack, path_or_buf=None)
tm.assertRaises(ValueError, read_msgpack, path_or_buf={})
tm.assertRaises(ValueError, read_msgpack, path_or_buf=A())
class TestNumpy(TestPackers):
def test_numpy_scalar_float(self):
x = np.float32(np.random.rand())
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_scalar_complex(self):
x = np.complex64(np.random.rand() + 1j * np.random.rand())
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_scalar_float(self):
x = np.random.rand()
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_scalar_complex(self):
x = np.random.rand() + 1j * np.random.rand()
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_numpy_float(self):
x = [np.float32(np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_numpy_float_complex(self):
if not hasattr(np, 'complex128'):
raise nose.SkipTest('numpy cant handle complex128')
x = [np.float32(np.random.rand()) for i in range(5)] + \
[np.complex128(np.random.rand() + 1j * np.random.rand())
for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_list_float(self):
x = [np.random.rand() for i in range(5)]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
def test_list_float_complex(self):
x = [np.random.rand() for i in range(5)] + \
[(np.random.rand() + 1j * np.random.rand()) for i in range(5)]
x_rec = self.encode_decode(x)
self.assertTrue(np.allclose(x, x_rec))
def test_dict_float(self):
x = {'foo': 1.0, 'bar': 2.0}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_complex(self):
x = {'foo': 1.0 + 1.0j, 'bar': 2.0 + 2.0j}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_dict_numpy_float(self):
x = {'foo': np.float32(1.0), 'bar': np.float32(2.0)}
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_dict_numpy_complex(self):
x = {'foo': np.complex128(1.0 + 1.0j),
'bar': np.complex128(2.0 + 2.0j)}
x_rec = self.encode_decode(x)
self.assertEqual(x, x_rec)
for key in x:
self.assertEqual(type(x[key]), type(x_rec[key]))
def test_numpy_array_float(self):
# run multiple times
for n in range(10):
x = np.random.rand(10)
for dtype in ['float32', 'float64']:
x = x.astype(dtype)
x_rec = self.encode_decode(x)
tm.assert_almost_equal(x, x_rec)
def test_numpy_array_complex(self):
x = (np.random.rand(5) + 1j * np.random.rand(5)).astype(np.complex128)
x_rec = self.encode_decode(x)
self.assertTrue(all(map(lambda x, y: x == y, x, x_rec)) and
x.dtype == x_rec.dtype)
def test_list_mixed(self):
x = [1.0, np.float32(3.5), np.complex128(4.25), u('foo')]
x_rec = self.encode_decode(x)
# current msgpack cannot distinguish list/tuple
tm.assert_almost_equal(tuple(x), x_rec)
x_rec = self.encode_decode(tuple(x))
tm.assert_almost_equal(tuple(x), x_rec)
class TestBasic(TestPackers):
def test_timestamp(self):
for i in [Timestamp(
'20130101'), Timestamp('20130101', tz='US/Eastern'),
Timestamp('201301010501')]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_nat(self):
nat_rec = self.encode_decode(NaT)
self.assertIs(NaT, nat_rec)
def test_datetimes(self):
# fails under 2.6/win32 (np.datetime64 seems broken)
if LooseVersion(sys.version) < '2.7':
raise nose.SkipTest('2.6 with np.datetime64 is broken')
for i in [datetime.datetime(2013, 1, 1),
datetime.datetime(2013, 1, 1, 5, 1),
datetime.date(2013, 1, 1),
np.datetime64(datetime.datetime(2013, 1, 5, 2, 15))]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
def test_timedeltas(self):
for i in [datetime.timedelta(days=1),
datetime.timedelta(days=1, seconds=10),
np.timedelta64(1000000)]:
i_rec = self.encode_decode(i)
self.assertEqual(i, i_rec)
class TestIndex(TestPackers):
def setUp(self):
super(TestIndex, self).setUp()
self.d = {
'string': tm.makeStringIndex(100),
'date': tm.makeDateIndex(100),
'int': tm.makeIntIndex(100),
'rng': tm.makeRangeIndex(100),
'float': tm.makeFloatIndex(100),
'empty': Index([]),
'tuple': Index(zip(['foo', 'bar', 'baz'], [1, 2, 3])),
'period': Index(period_range('2012-1-1', freq='M', periods=3)),
'date2': Index(date_range('2013-01-1', periods=10)),
'bdate': Index(bdate_range('2013-01-02', periods=10)),
}
self.mi = {
'reg': MultiIndex.from_tuples([('bar', 'one'), ('baz', 'two'),
('foo', 'two'),
('qux', 'one'), ('qux', 'two')],
names=['first', 'second']),
}
def test_basic_index(self):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with no freq (GH5506)
i = Index([Timestamp('20130101'), Timestamp('20130103')])
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
# datetime with timezone
i = Index([Timestamp('20130101 9:00:00'), Timestamp(
'20130103 11:00:00')]).tz_localize('US/Eastern')
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_multi_index(self):
for s, i in self.mi.items():
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
def test_unicode(self):
i = tm.makeUnicodeIndex(100)
i_rec = self.encode_decode(i)
self.assert_index_equal(i, i_rec)
class TestSeries(TestPackers):
def setUp(self):
super(TestSeries, self).setUp()
self.d = {}
s = tm.makeStringSeries()
s.name = 'string'
self.d['string'] = s
s = tm.makeObjectSeries()
s.name = 'object'
self.d['object'] = s
s = Series(tslib.iNaT, dtype='M8[ns]', index=range(5))
self.d['date'] = s
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 2 +
[Timestamp('20130603', tz='CET')] * 3,
'G': [Timestamp('20130102', tz='US/Eastern')] * 5,
}
self.d['float'] = Series(data['A'])
self.d['int'] = Series(data['B'])
self.d['mixed'] = Series(data['E'])
self.d['dt_tz_mixed'] = Series(data['F'])
self.d['dt_tz'] = Series(data['G'])
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_series_equal(i, i_rec)
class TestCategorical(TestPackers):
def setUp(self):
super(TestCategorical, self).setUp()
self.d = {}
self.d['plain_str'] = Categorical(['a', 'b', 'c', 'd', 'e'])
self.d['plain_str_ordered'] = Categorical(['a', 'b', 'c', 'd', 'e'],
ordered=True)
self.d['plain_int'] = Categorical([5, 6, 7, 8])
self.d['plain_int_ordered'] = Categorical([5, 6, 7, 8], ordered=True)
def test_basic(self):
# run multiple times here
for n in range(10):
for s, i in self.d.items():
i_rec = self.encode_decode(i)
assert_categorical_equal(i, i_rec)
class TestNDFrame(TestPackers):
def setUp(self):
super(TestNDFrame, self).setUp()
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.],
'F': [Timestamp('20130102', tz='US/Eastern')] * 5,
'G': [Timestamp('20130603', tz='CET')] * 5,
'H': Categorical(['a', 'b', 'c', 'd', 'e']),
'I': Categorical(['a', 'b', 'c', 'd', 'e'], ordered=True),
}
self.frame = {
'float': DataFrame(dict(A=data['A'], B=Series(data['A']) + 1)),
'int': DataFrame(dict(A=data['B'], B=Series(data['B']) + 1)),
'mixed': DataFrame(data)}
self.panel = {
'float': Panel(dict(ItemA=self.frame['float'],
ItemB=self.frame['float'] + 1))}
def test_basic_frame(self):
for s, i in self.frame.items():
i_rec = self.encode_decode(i)
assert_frame_equal(i, i_rec)
def test_basic_panel(self):
for s, i in self.panel.items():
i_rec = self.encode_decode(i)
assert_panel_equal(i, i_rec)
def test_multi(self):
i_rec = self.encode_decode(self.frame)
for k in self.frame.keys():
assert_frame_equal(self.frame[k], i_rec[k])
l = tuple([self.frame['float'], self.frame['float'].A,
self.frame['float'].B, None])
l_rec = self.encode_decode(l)
check_arbitrary(l, l_rec)
# this is an oddity in that packed lists will be returned as tuples
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
l_rec = self.encode_decode(l)
self.assertIsInstance(l_rec, tuple)
check_arbitrary(l, l_rec)
def test_iterator(self):
l = [self.frame['float'], self.frame['float']
.A, self.frame['float'].B, None]
with ensure_clean(self.path) as path:
to_msgpack(path, *l)
for i, packed in enumerate(read_msgpack(path, iterator=True)):
check_arbitrary(packed, l[i])
def tests_datetimeindex_freq_issue(self):
# GH 5947
# inferring freq on the datetimeindex
df = DataFrame([1, 2, 3], index=date_range('1/1/2013', '1/3/2013'))
result = self.encode_decode(df)
| assert_frame_equal(result, df) | pandas.util.testing.assert_frame_equal |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.inputs_handler import matsim_reader
from tests.test_outputs_handler_matsim_xml_writer import network_dtd, schedule_dtd
from genet.schedule_elements import Route, Service, Schedule
from genet.utils import plot, spatial
from genet.inputs_handler import read
from tests.fixtures import assert_semantically_equal, route, stop_epsg_27700, network_object_from_test_data, \
full_fat_default_config_path, correct_schedule, vehicle_definitions_config_path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
puma_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "network.xml"))
puma_schedule_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "schedule.xml"))
simplified_network = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "network.xml"))
simplified_schedule = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "schedule.xml"))
network_link_attrib_text_missing = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network_link_attrib_text_missing.xml"))
@pytest.fixture()
def network1():
n1 = Network('epsg:27700')
n1.add_node('101982',
{'id': '101982',
'x': '528704.1425925883',
'y': '182068.78193707118',
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n1.add_node('101986',
{'id': '101986',
'x': '528835.203274008',
'y': '182006.27331298392',
'lon': -0.14439428709377497,
'lat': 51.52228713323965,
's2_id': 5221390328605860387})
n1.add_link('0', '101982', '101986',
attribs={'id': '0',
'from': '101982',
'to': '101986',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390328605860387,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n1
@pytest.fixture()
def network2():
n2 = Network('epsg:4326')
n2.add_node('101982',
{'id': '101982',
'x': -0.14625948709424305,
'y': 51.52287873323954,
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n2.add_node('101990',
{'id': '101990',
'x': -0.14770188709624754,
'y': 51.5205729332399,
'lon': -0.14770188709624754,
'lat': 51.5205729332399,
's2_id': 5221390304444511271})
n2.add_link('0', '101982', '101990',
attribs={'id': '0',
'from': '101982',
'to': '101990',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390304444511271,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n2
def test_network_graph_initiates_as_not_simplififed():
n = Network('epsg:27700')
assert not n.graph.graph['simplified']
def test__repr__shows_graph_info_and_schedule_info():
n = Network('epsg:4326')
assert 'instance at' in n.__repr__()
assert 'graph' in n.__repr__()
assert 'schedule' in n.__repr__()
def test__str__shows_info():
n = Network('epsg:4326')
assert 'Graph info' in n.__str__()
assert 'Schedule info' in n.__str__()
def test_reproject_changes_x_y_values_for_all_nodes(network1):
network1.reproject('epsg:4326')
nodes = dict(network1.nodes())
correct_nodes = {
'101982': {'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'101986': {'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}}
target_change_log = pd.DataFrame(
{'timestamp': {3: '2020-07-09 19:50:51', 4: '2020-07-09 19:50:51'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'node', 4: 'node'}, 'old_id': {3: '101982', 4: '101986'},
'new_id': {3: '101982', 4: '101986'}, 'old_attributes': {
3: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'new_attributes': {
3: "{'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'diff': {3: [('change', 'x', ('528704.1425925883', -0.14625948709424305)),
('change', 'y', ('182068.78193707118', 51.52287873323954))],
4: [('change', 'x', ('528835.203274008', -0.14439428709377497)),
('change', 'y', ('182006.27331298392', 51.52228713323965))]}}
)
assert_semantically_equal(nodes, correct_nodes)
for i in [3, 4]:
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'old_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'old_attributes']))
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'new_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'new_attributes']))
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), target_change_log[cols_to_compare],
check_dtype=False)
def test_reproject_delegates_reprojection_to_schedules_own_method(network1, route, mocker):
mocker.patch.object(Schedule, 'reproject')
network1.schedule = Schedule(epsg='epsg:27700', services=[Service(id='id', routes=[route])])
network1.reproject('epsg:4326')
network1.schedule.reproject.assert_called_once_with('epsg:4326', 1)
def test_reproject_updates_graph_crs(network1):
network1.reproject('epsg:4326')
assert network1.graph.graph['crs'] == {'init': 'epsg:4326'}
def test_reprojecting_links_with_geometries():
n = Network('epsg:27700')
n.add_nodes({'A': {'x': -82514.72274, 'y': 220772.02798},
'B': {'x': -82769.25894, 'y': 220773.0637}})
n.add_links({'1': {'from': 'A', 'to': 'B',
'geometry': LineString([(-82514.72274, 220772.02798),
(-82546.23894, 220772.88254),
(-82571.87107, 220772.53339),
(-82594.92709, 220770.68385),
(-82625.33255, 220770.45579),
(-82631.26842, 220770.40158),
(-82669.7309, 220770.04349),
(-82727.94946, 220770.79793),
(-82757.38528, 220771.75412),
(-82761.82425, 220771.95614),
(-82769.25894, 220773.0637)])}})
n.reproject('epsg:2157')
geometry_coords = list(n.link('1')['geometry'].coords)
assert round(geometry_coords[0][0], 7) == 532006.5605980
assert round(geometry_coords[0][1], 7) == 547653.3751768
assert round(geometry_coords[-1][0], 7) == 531753.4315189
assert round(geometry_coords[-1][1], 7) == 547633.5224837
def test_adding_the_same_networks():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_the_same_networks_but_with_differing_projections():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right.reproject('epsg:4326')
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_node_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('20', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_link_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('10', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_multiindices():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk', 'bike']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 2
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
assert n_left.graph['1']['2'][0] == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_disjoint_networks_with_unique_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('20', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('100', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {'10': {'id': '1', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 1},
'20': {'id': '2', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 2},
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954,
's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965,
's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'100': {'modes': ['walk'], 'from': '10', 'to': '20', 'id': '100'},
'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_disjoint_networks_with_clashing_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('2', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 4
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_simplified_network_and_not_throws_error():
n = Network('epsg:2770')
m = Network('epsg:2770')
m.graph.graph['simplified'] = True
with pytest.raises(RuntimeError) as error_info:
n.add(m)
assert "cannot add" in str(error_info.value)
def test_print_shows_info(mocker):
mocker.patch.object(Network, 'info')
n = Network('epsg:27700')
n.print()
n.info.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker):
mocker.patch.object(plot, 'plot_graph_routes')
n = Network('epsg:27700')
n.plot()
plot.plot_graph_routes.assert_called_once()
def test_plot_graph_delegates_to_util_plot_plot_graph(mocker):
mocker.patch.object(plot, 'plot_graph')
n = Network('epsg:27700')
n.plot_graph()
plot.plot_graph.assert_called_once()
def test_plot_schedule_delegates_to_util_plot_plot_non_routed_schedule_graph(mocker, network_object_from_test_data):
mocker.patch.object(plot, 'plot_non_routed_schedule_graph')
n = network_object_from_test_data
n.plot_schedule()
plot.plot_non_routed_schedule_graph.assert_called_once()
def test_attempt_to_simplify_already_simplified_network_throws_error():
n = Network('epsg:27700')
n.graph.graph["simplified"] = True
with pytest.raises(RuntimeError) as error_info:
n.simplify()
assert "cannot simplify" in str(error_info.value)
def test_simplifing_puma_network_results_in_correct_record_of_removed_links_and_expected_graph_data():
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
link_ids_pre_simplify = set(dict(n.links()).keys())
n.simplify()
assert n.is_simplified()
link_ids_post_simplify = set(dict(n.links()).keys())
assert link_ids_post_simplify & link_ids_pre_simplify
new_links = link_ids_post_simplify - link_ids_pre_simplify
deleted_links = link_ids_pre_simplify - link_ids_post_simplify
assert set(n.link_simplification_map.keys()) == deleted_links
assert set(n.link_simplification_map.values()) == new_links
assert (set(n.link_id_mapping.keys()) & new_links) == new_links
report = n.generate_validation_report()
assert report['routing']['services_have_routes_in_the_graph']
assert report['schedule']['schedule_level']['is_valid_schedule']
def test_simplified_network_saves_to_correct_dtds(tmpdir, network_dtd, schedule_dtd):
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
n.simplify()
n.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
generated_schedule_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_schedule_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
schedule_dtd.error_log.filter_from_errors())
def test_simplifying_network_with_multi_edges_resulting_in_multi_paths():
n = Network('epsg:27700')
n.add_nodes({
'n_-1': {'x': -1, 'y': -1, 's2_id': -1},
'n_0': {'x': 0, 'y': 0, 's2_id': 0},
'n_1': {'x': 1, 'y': 1, 's2_id': 1},
'n_2': {'x': 2, 'y': 2, 's2_id': 2},
'n_3': {'x': 3, 'y': 3, 's2_id': 3},
'n_4': {'x': 4, 'y': 4, 's2_id': 4},
'n_5': {'x': 5, 'y': 5, 's2_id': 5},
'n_6': {'x': 6, 'y': 5, 's2_id': 6},
})
n.add_links({
'l_-1': {'from': 'n_-1', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_0': {'from': 'n_0', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_1': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_2': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_3': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_4': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_5': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_6': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_7': {'from': 'n_4', 'to': 'n_5', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_8': {'from': 'n_4', 'to': 'n_6', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}}
})
n.simplify()
assert set(n.link_simplification_map) == {'l_4', 'l_1', 'l_5', 'l_3', 'l_6', 'l_2'}
def test_reading_back_simplified_network():
# simplified networks have additional geometry attribute and some of their attributes are composite, e.g. links
# now refer to a number of osm ways each with a unique id
n = read.read_matsim(path_to_network=simplified_network, epsg='epsg:27700',
path_to_schedule=simplified_schedule)
number_of_simplified_links = 659
links_with_geometry = n.extract_links_on_edge_attributes(conditions={'geometry': lambda x: True})
assert len(links_with_geometry) == number_of_simplified_links
for link in links_with_geometry:
attribs = n.link(link)
if 'attributes' in attribs:
assert not 'geometry' in attribs['attributes']
for k, v in attribs['attributes'].items():
if isinstance(v['text'], str):
assert not ',' in v['text']
def test_network_with_missing_link_attribute_elem_text_is_read_and_able_to_save_again(tmpdir):
n = read.read_matsim(path_to_network=network_link_attrib_text_missing, epsg='epsg:27700')
n.write_to_matsim(tmpdir)
def test_node_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_node(1, {'a': {'b': 1}})
n.add_node(2, {'a': {'b': 4}})
output_series = n.node_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_node(1, {'b': 1})
n.add_node(2, {'b': 4})
output_series = n.node_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_keys(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'])
df_to_compare = pd.DataFrame({'x': {'101982': '528704.1425925883', '101986': '528835.203274008'},
'y': {'101982': '182068.78193707118', '101986': '182006.27331298392'}})
assert_frame_equal(df, df_to_compare)
def test_node_attribute_data_under_keys_with_named_index(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'], index_name='index')
assert df.index.name == 'index'
def test_node_attribute_data_under_keys_generates_key_for_nested_data(network1):
network1.add_node('1', {'key': {'nested_value': {'more_nested': 4}}})
df = network1.node_attribute_data_under_keys([{'key': {'nested_value': 'more_nested'}}])
assert isinstance(df, pd.DataFrame)
assert 'key::nested_value::more_nested' in df.columns
def test_node_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.node_attribute_data_under_keys(['x'], index_name='index')
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': {'b': 1}})
n.add_link('1', 1, 2, attribs={'a': {'b': 4}})
output_series = n.link_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'b': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
output_series = n.link_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({'0': 1, '1': 4}))
def test_link_attribute_data_under_keys(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'])
df_to_compare = pd.DataFrame({'modes': {'0': ['car']}, 'freespeed': {'0': 4.166666666666667},
'capacity': {'0': 600.0}, 'permlanes': {'0': 1.0}})
assert_frame_equal(df, df_to_compare)
def test_link_attribute_data_under_keys_with_named_index(network1):
df = network1.link_attribute_data_under_keys(['modes', 'freespeed', 'capacity', 'permlanes'], index_name='index')
assert df.index.name == 'index'
def test_link_attribute_data_under_keys_returns_dataframe_with_one_col_if_passed_one_key(network1):
df = network1.link_attribute_data_under_keys(['modes'])
assert isinstance(df, pd.DataFrame)
assert len(df.columns) == 1
def test_link_attribute_data_under_keys_generates_key_for_nested_data(network1):
df = network1.link_attribute_data_under_keys([{'attributes': {'osm:way:access': 'text'}}])
assert isinstance(df, pd.DataFrame)
assert 'attributes::osm:way:access::text' in df.columns
def test_add_node_adds_node_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
assert n.graph.has_node(1)
assert n.node(1) == {'a': 1}
def test_add_node_adds_node_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_node(1)
assert n.node(1) == {}
assert n.graph.has_node(1)
def test_add_multiple_nodes():
n = Network('epsg:27700')
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {'x': 1, 'y': 2, 'id': 1}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert reindexing_dict == {}
def test_add_nodes_with_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {'x': 2, 'y': 2, 'id': 2}
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
def test_add_nodes_with_multiple_clashing_ids():
n = Network('epsg:27700')
n.add_node(1, {})
n.add_node(2, {})
assert n.graph.has_node(1)
assert n.node(1) == {}
assert n.graph.has_node(2)
assert n.node(2) == {}
reindexing_dict, actual_nodes_added = n.add_nodes({1: {'x': 1, 'y': 2}, 2: {'x': 2, 'y': 2}})
assert 1 in reindexing_dict
assert n.graph.has_node(reindexing_dict[1])
assert n.node(reindexing_dict[1]) == {'x': 1, 'y': 2, 'id': reindexing_dict[1]}
assert 2 in reindexing_dict
assert n.graph.has_node(reindexing_dict[2])
assert n.node(reindexing_dict[2]) == {'x': 2, 'y': 2, 'id': reindexing_dict[2]}
def test_add_edge_generates_a_link_id_and_delegated_to_add_link_id(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, None, {'a': 1}, False)
def test_add_edge_generates_a_link_id_with_specified_multiidx(mocker):
mocker.patch.object(Network, 'add_link')
mocker.patch.object(Network, 'generate_index_for_edge', return_value='12345')
n = Network('epsg:27700')
n.add_edge(1, 2, multi_edge_idx=10, attribs={'a': 1})
Network.generate_index_for_edge.assert_called_once()
Network.add_link.assert_called_once_with('12345', 1, 2, 10, {'a': 1}, False)
def test_adding_multiple_edges():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
if n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
elif n.link_id_mapping['1'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}:
assert n.link_id_mapping['0'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
else:
raise AssertionError()
def test_adding_multiple_edges_between_same_nodes():
n = Network('epsg:27700')
n.add_edges([{'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 1, 'to': 2}, {'from': 2, 'to': 3}])
assert n.graph.has_edge(1, 2)
assert n.graph.number_of_edges(1, 2) == 3
assert n.graph.has_edge(2, 3)
assert len(n.link_id_mapping) == 4
def test_add_link_adds_edge_to_graph_with_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
assert n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.edge(1, 2) == {0: {'a': 1, 'from': 1, 'id': '0', 'to': 2}}
def test_add_link_adds_edge_to_graph_without_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.graph.has_edge(1, 2)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
def test_adding_multiple_links():
n = Network('epsg:27700')
n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert n.graph.has_edge(1, 2)
assert n.graph.has_edge(2, 3)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.link_id_mapping['1'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
assert '0' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '1' in n.link_id_mapping
assert '0' in reindexing_dict
assert len(n.link_id_mapping) == 3
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs['1'], {'from': 2, 'to': 3, 'id': '1'})
def test_adding_multiple_links_with_multiple_id_clashes():
n = Network('epsg:27700')
n.add_link('0', 10, 20)
n.add_link('1', 10, 20)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links({'0': {'from': 1, 'to': 2}, '1': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 4
assert_semantically_equal(links_and_attribs[reindexing_dict['0']], {'from': 1, 'to': 2, 'id': reindexing_dict['0']})
assert_semantically_equal(links_and_attribs[reindexing_dict['1']], {'from': 2, 'to': 3, 'id': reindexing_dict['1']})
def test_adding_loads_of_multiple_links_between_same_nodes():
n = Network('epsg:27700')
reindexing_dict, links_and_attribs = n.add_links({i: {'from': 1, 'to': 2} for i in range(10)})
assert_semantically_equal(links_and_attribs, {i: {'from': 1, 'to': 2, 'id': i} for i in range(10)})
assert_semantically_equal(n.link_id_mapping, {i: {'from': 1, 'to': 2, 'multi_edge_idx': i} for i in range(10)})
def test_adding_multiple_links_with_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
n.add_links({'2': {'from': 1, 'to': 2}, '3': {'from': 1, 'to': 2}, '4': {'from': 2, 'to': 3}})
assert n.link_id_mapping['2'] == {'from': 1, 'to': 2, 'multi_edge_idx': 2}
assert n.link_id_mapping['3'] == {'from': 1, 'to': 2, 'multi_edge_idx': 3}
assert n.link_id_mapping['4'] == {'from': 2, 'to': 3, 'multi_edge_idx': 0}
def test_adding_multiple_links_with_id_and_multi_idx_clashes():
n = Network('epsg:27700')
n.add_link('0', 1, 2)
n.add_link('1', 1, 2)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
reindexing_dict, links_and_attribs = n.add_links(
{'0': {'from': 1, 'to': 2}, '1': {'from': 1, 'to': 2}, '2': {'from': 2, 'to': 3}})
assert '0' in reindexing_dict
assert '1' in reindexing_dict
assert len(n.link_id_mapping) == 5
assert_semantically_equal(n.link_id_mapping[reindexing_dict['0']], {'from': 1, 'to': 2, 'multi_edge_idx': 2})
assert_semantically_equal(n.link_id_mapping[reindexing_dict['1']], {'from': 1, 'to': 2, 'multi_edge_idx': 3})
def test_adding_multiple_links_missing_some_from_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_from_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'to': 2}, '1': {'to': 3}})
assert "You are trying to add links which are missing `from` (origin) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_some_to_nodes():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2, 'to': 3}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_multiple_links_missing_to_nodes_completely():
n = Network('epsg:27700')
with pytest.raises(RuntimeError) as error_info:
n.add_links({'0': {'from': 2}, '1': {'from': 2}})
assert "You are trying to add links which are missing `to` (destination) nodes" in str(error_info.value)
def test_adding_links_with_different_non_overlapping_attributes():
# generates a nan attribute for link attributes
n = Network('epsg:27700')
reindexing_dict, links_and_attributes = n.add_links({
'2': {'from': 1, 'to': 2, 'speed': 20},
'3': {'from': 1, 'to': 2, 'capacity': 123},
'4': {'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
assert reindexing_dict == {}
assert_semantically_equal(links_and_attributes, {
'2': {'id': '2', 'from': 1, 'to': 2, 'speed': 20},
'3': {'id': '3', 'from': 1, 'to': 2, 'capacity': 123},
'4': {'id': '4', 'from': 2, 'to': 3, 'modes': [1, 2, 3]}})
def test_adding_multiple_links_to_same_edge_clashing_with_existing_edge():
n = Network('epsg:27700')
n.add_link(link_id='0', u='2', v='2', attribs={'speed': 20})
n.add_links({'1': {'from': '2', 'to': '2', 'something': 20},
'2': {'from': '2', 'to': '2', 'capacity': 123}})
assert_semantically_equal(dict(n.links()), {'0': {'speed': 20, 'from': '2', 'to': '2', 'id': '0'},
'1': {'from': '2', 'to': '2', 'something': 20.0, 'id': '1'},
'2': {'from': '2', 'to': '2', 'capacity': 123.0, 'id': '2'}})
assert_semantically_equal(n.link_id_mapping, {'0': {'from': '2', 'to': '2', 'multi_edge_idx': 0},
'1': {'from': '2', 'to': '2', 'multi_edge_idx': 1},
'2': {'from': '2', 'to': '2', 'multi_edge_idx': 2}})
def test_network_modal_subgraph_using_general_subgraph_on_link_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.subgraph_on_link_conditions(conditions={'modes': 'car'}, mixed_dtypes=True)
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={})
assert n.modes() == {'car', 'bike'}
def test_network_modal_subgraph_using_specific_modal_subgraph_method_single_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
car_graph = n.modal_subgraph(modes='car')
assert list(car_graph.edges) == [(1, 2, 0), (2, 3, 0)]
def test_network_modal_subgraph_using_specific_modal_subgraph_method_several_modes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_bike_graph = n.modal_subgraph(modes=['car', 'bike'])
assert list(car_bike_graph.edges) == [(1, 2, 0), (2, 3, 0), (2, 3, 1)]
def test_links_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_links = n.links_on_modal_condition(modes=['car'])
assert set(car_links) == {'0', '1'}
def test_nodes_on_modal_condition():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike']})
n.add_link('1', 2, 3, attribs={'modes': ['car']})
n.add_link('2', 2, 3, attribs={'modes': ['bike']})
n.add_link('3', 2, 3, attribs={'modes': ['walk']})
car_nodes = n.nodes_on_modal_condition(modes=['car'])
assert set(car_nodes) == {1, 2, 3}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_nodes_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(test_geojson)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_nodes_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node(
'1', {'id': '1', 'x': 508400, 'y': 162050, 's2_id': spatial.generate_index_s2(51.3472033, 0.4449167)})
nodes = network_object_from_test_data.nodes_on_spatial_condition(region)
assert set(nodes) == {'21667818', '25508485'}
def test_links_on_spatial_condition_with_geojson(network_object_from_test_data):
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(test_geojson)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_shapely_geom(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region)
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_intersection_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='intersect')
assert set(links) == {'1', '2'}
def test_links_on_spatial_condition_with_containement(network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_region(
network_object_from_test_data):
region = Polygon([(-0.1487016677856445, 51.52556684350165), (-0.14063358306884766, 51.5255134425896),
(-0.13865947723388672, 51.5228700191647), (-0.14093399047851562, 51.52006622056997),
(-0.1492595672607422, 51.51974577545329), (-0.1508045196533203, 51.52276321095246),
(-0.1487016677856445, 51.52556684350165)])
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_s2_region(network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_node('1', {'id': '1', 'x': 508400, 'y': 162050})
network_object_from_test_data.add_link('2', u='21667818', v='1')
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_links_on_spatial_condition_with_containement_and_complex_geometry_that_falls_outside_s2_region(
network_object_from_test_data):
region = '48761ad04d,48761ad054,48761ad05c,48761ad061,48761ad085,48761ad08c,48761ad094,48761ad09c,48761ad0b,48761ad0d,48761ad0f,48761ad14,48761ad182c,48761ad19c,48761ad1a4,48761ad1ac,48761ad1b4,48761ad1bac,48761ad3d7f,48761ad3dc,48761ad3e4,48761ad3ef,48761ad3f4,48761ad3fc,48761ad41,48761ad43,48761ad5d,48761ad5e4,48761ad5ec,48761ad5fc,48761ad7,48761ad803,48761ad81c,48761ad824,48761ad82c,48761ad9d,48761ad9e4,48761ad9e84,48761ad9fc,48761ada04,48761ada0c,48761b2804,48761b2814,48761b281c,48761b283,48761b2844,48761b284c,48761b2995,48761b29b4,48761b29bc,48761b29d,48761b29f,48761b2a04'
network_object_from_test_data.add_link(
'2', u='21667818', v='25508485',
attribs={'geometry': LineString(
[(528504.1342843144, 182155.7435136598), (508400, 162050), (528489.467895946, 182206.20303669578)])})
links = network_object_from_test_data.links_on_spatial_condition(region, how='within')
assert set(links) == {'1'}
def test_find_shortest_path_when_graph_has_no_extra_edge_choices():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_subgraph_is_pre_computed():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'length': 1})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'length': 1})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'length': 1})
bike_g = n.modal_subgraph(modes='bike')
bike_route = n.find_shortest_path(1, 3, subgraph=bike_g)
assert bike_route == ['0', '2']
def test_find_shortest_path_defaults_to_full_graph():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1})
n.add_link('1', 2, 3, attribs={'modes': ['car'], 'freespeed': 3})
n.add_link('2', 2, 3, attribs={'modes': ['bike'], 'freespeed': 2})
n.add_link('3', 2, 3, attribs={'modes': ['walk'], 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3)
assert bike_route == ['0', '1']
def test_find_shortest_path_when_graph_has_extra_edge_choice_for_freespeed_that_is_obvious():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '2']
def test_find_shortest_path_when_graph_has_extra_edge_choice_with_attractive_mode():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('3', 2, 3, attribs={'modes': ['bike'], 'length': 1, 'freespeed': 1})
bike_route = n.find_shortest_path(1, 3, modes='bike')
assert bike_route == ['0', '3']
def test_find_shortest_path_and_return_just_nodes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
n.add_link('1', 2, 3, attribs={'modes': ['car', 'bike'], 'length': 1, 'freespeed': 10})
bike_route = n.find_shortest_path(1, 3, return_nodes=True)
assert bike_route == [1, 2, 3]
def test_add_link_adds_link_with_specific_multi_idx():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
assert '0' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
def test_add_link_generates_new_multi_idx_if_already_exists():
n = Network('epsg:27700')
n.add_link('0', 1, 2, 0)
n.add_link('1', 1, 2, 0)
assert '0' in n.link_id_mapping
assert '1' in n.link_id_mapping
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert n.graph[1][2][0] == {'from': 1, 'to': 2, 'id': '0'}
assert n.link_id_mapping['1']['multi_edge_idx'] != 0
assert n.graph[1][2][n.link_id_mapping['1']['multi_edge_idx']] == {'from': 1, 'to': 2, 'id': '1'}
def test_reindex_node(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '007')
assert [id for id, attribs in network1.nodes()] == ['007', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '007'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('007', '101986')]
assert network1.link_id_mapping['0']['from'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:39:08', 4: '2020-06-08 19:39:08', 5: '2020-06-08 19:39:08'},
'change_event': {3: 'modify', 4: 'modify', 5: 'modify'}, 'object_type': {3: 'link', 4: 'node', 5: 'node'},
'old_id': {3: '0', 4: '101982', 5: '101982'}, 'new_id': {3: '0', 4: '007', 5: '101982'}, 'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'new_attributes': {
3: "{'id': '0', 'from': '007', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
5: "{'id': '007', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}"},
'diff': {3: [('change', 'from', ('101982', '007'))],
4: [('change', 'id', ('101982', '007')), ('change', 'id', ('101982', '007'))],
5: [('change', 'id', ('101982', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(3), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_reindex_node_when_node_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.link_id_mapping['0']['from'] == '101982'
network1.reindex_node('101982', '101986')
node_ids = [id for id, attribs in network1.nodes()]
assert '101986' in node_ids
assert '101982' not in node_ids
assert len(set(node_ids)) == 2
assert network1.node(node_ids[0]) != network1.node(node_ids[1])
def test_reindex_link(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert '0' in network1.link_id_mapping
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '0'
network1.reindex_link('0', '007')
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['007']
assert '0' not in network1.link_id_mapping
assert '007' in network1.link_id_mapping
assert network1.link('007')['from'] == '101982'
assert network1.link('007')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
assert network1.edge('101982', '101986')[0]['id'] == '007'
correct_change_log_df = pd.DataFrame(
{'timestamp': {3: '2020-06-08 19:34:48', 4: '2020-06-08 19:34:48'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'link', 4: 'link'}, 'old_id': {3: '0', 4: '0'}, 'new_id': {3: '007', 4: '0'},
'old_attributes': {
3: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'new_attributes': {
3: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}",
4: "{'id': '007', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0, 'permlanes': 1.0, 'oneway': '1', 'modes': ['car'], 's2_from': 5221390329378179879, 's2_to': 5221390328605860387, 'length': 52.765151087870265, 'attributes': {'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'}, 'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String', 'text': 'unclassified'}, 'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'}, 'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String', 'text': 'Brunswick Place'}}}"},
'diff': {3: [('change', 'id', ('0', '007')), ('change', 'id', ('0', '007'))],
4: [('change', 'id', ('0', '007'))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_names=False, check_dtype=False)
def test_reindex_link_when_link_id_already_exists(network1):
assert [id for id, attribs in network1.nodes()] == ['101982', '101986']
assert [id for id, attribs in network1.links()] == ['0']
assert network1.link('0')['from'] == '101982'
assert network1.link('0')['to'] == '101986'
assert [(from_n, to_n) for from_n, to_n, attribs in network1.edges()] == [('101982', '101986')]
network1.add_link('1', '101986', '101982', attribs={})
network1.reindex_link('0', '1')
link_ids = [id for id, attribs in network1.links()]
assert '1' in link_ids
assert '0' not in link_ids
assert len(set(link_ids)) == 2
assert network1.link(link_ids[0]) != network1.link(link_ids[1])
def test_modify_node_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'b': 1})
assert n.node(1) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 1, 'b': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_names=False,
check_dtype=False)
def test_modify_node_overwrites_existing_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.apply_attributes_to_node(1, {'a': 4})
assert n.node(1) == {'a': 4}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-05-28 13:49:53', 1: '2020-05-28 13:49:53'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'node', 1: 'node'}, 'old_id': {0: None, 1: 1}, 'new_id': {0: 1, 1: 1},
'old_attributes': {0: None, 1: "{'a': 1}"}, 'new_attributes': {0: "{'a': 1}", 1: "{'a': 4}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('change', 'a', (1, 4))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_nodes_adds_and_changes_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_node(1, {'a': 1})
n.add_node(2, {'b': 1})
n.apply_attributes_to_nodes({1: {'a': 4}, 2: {'a': 1}})
assert n.node(1) == {'a': 4}
assert n.node(2) == {'b': 1, 'a': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-01 15:07:51', 1: '2020-06-01 15:07:51', 2: '2020-06-01 15:07:51',
3: '2020-06-01 15:07:51'}, 'change_event': {0: 'add', 1: 'add', 2: 'modify', 3: 'modify'},
'object_type': {0: 'node', 1: 'node', 2: 'node', 3: 'node'}, 'old_id': {0: None, 1: None, 2: 1, 3: 2},
'new_id': {0: 1, 1: 2, 2: 1, 3: 2}, 'old_attributes': {0: None, 1: None, 2: "{'a': 1}", 3: "{'b': 1}"},
'new_attributes': {0: "{'a': 1}", 1: "{'b': 1}", 2: "{'a': 4}", 3: "{'b': 1, 'a': 1}"},
'diff': {0: [('add', '', [('a', 1)]), ('add', 'id', 1)], 1: [('add', '', [('b', 1)]), ('add', 'id', 2)],
2: [('change', 'a', (1, 4))], 3: [('add', '', [('a', 1)])]}
})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def multiply_node_attribs(node_attribs):
return node_attribs['a'] * node_attribs['c']
def test_apply_function_to_nodes():
n = Network('epsg:27700')
n.add_node('0', attribs={'a': 2, 'c': 3})
n.add_node('1', attribs={'c': 100})
n.apply_function_to_nodes(function=multiply_node_attribs, location='new_computed_attrib')
assert_semantically_equal(dict(n.nodes()),
{'0': {'a': 2, 'c': 3, 'new_computed_attrib': 6},
'1': {'c': 100}})
def test_apply_attributes_to_edge_without_filter_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.apply_attributes_to_edge(1, 2, {'c': 1})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1', 'c': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {2: '2020-07-10 14:53:25', 3: '2020-07-10 14:53:25'}, 'change_event': {2: 'modify', 3: 'modify'},
'object_type': {2: 'edge', 3: 'edge'}, 'old_id': {2: '(1, 2, 0)', 3: '(1, 2, 1)'},
'new_id': {2: '(1, 2, 0)', 3: '(1, 2, 1)'},
'old_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}", 3: "{'b': 1, 'from': 1, 'to': 2, 'id': '1'}"},
'new_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}",
3: "{'b': 1, 'from': 1, 'to': 2, 'id': '1', 'c': 1}"},
'diff': {2: [('add', '', [('c', 1)])], 3: [('add', '', [('c', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_dtype=False)
def test_apply_attributes_to_edge_with_filter_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.apply_attributes_to_edge(1, 2, {'c': 1}, conditions={'a': (0, 2)})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1'}
correct_change_log_df = pd.DataFrame(
{'timestamp': {2: '2020-07-10 14:53:25'}, 'change_event': {2: 'modify'},
'object_type': {2: 'edge'}, 'old_id': {2: '(1, 2, 0)'},
'new_id': {2: '(1, 2, 0)'},
'old_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}"},
'new_attributes': {2: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'c': 1}"},
'diff': {2: [('add', '', [('c', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(1), correct_change_log_df[cols_to_compare],
check_dtype=False)
def test_apply_attributes_to_multiple_edges():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.add_link('2', 2, 3, attribs={'c': 1})
n.add_link('3', 2, 3, attribs={'d': 1})
n.apply_attributes_to_edges({(1, 2): {'e': 1}, (2, 3): {'f': 1}})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'e': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1', 'e': 1}
assert n.link('2') == {'c': 1, 'from': 2, 'to': 3, 'id': '2', 'f': 1}
assert n.link('3') == {'d': 1, 'from': 2, 'to': 3, 'id': '3', 'f': 1}
def test_apply_attributes_to_multiple_edges_with_conditions():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 1})
n.add_link('2', 2, 3, attribs={'c': 1})
n.add_link('3', 2, 3, attribs={'d': 1})
n.apply_attributes_to_edges({(1, 2): {'e': 1}, (2, 3): {'f': 1}}, conditions=[{'a': (0, 2)}, {'c': (0, 2)}])
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'e': 1}
assert n.link('1') == {'b': 1, 'from': 1, 'to': 2, 'id': '1'}
assert n.link('2') == {'c': 1, 'from': 2, 'to': 3, 'id': '2', 'f': 1}
assert n.link('3') == {'d': 1, 'from': 2, 'to': 3, 'id': '3'}
def test_modify_link_adds_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.apply_attributes_to_link('0', {'b': 1})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'b': 1}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-12 20:02:49', 1: '2020-06-12 20:02:49'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'link', 1: 'link'}, 'old_id': {0: None, 1: '0'}, 'new_id': {0: '0', 1: '0'},
'old_attributes': {0: None, 1: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}"},
'new_attributes': {0: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}",
1: "{'a': 1, 'from': 1, 'to': 2, 'id': '0', 'b': 1}"},
'diff': {0: [('add', '', [('a', 1), ('from', 1), ('to', 2), ('id', '0')]), ('add', 'id', '0')],
1: [('add', '', [('b', 1)])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_link_overwrites_existing_attributes_in_the_graph_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.apply_attributes_to_link('0', {'a': 4})
assert n.link('0') == {'a': 4, 'from': 1, 'to': 2, 'id': '0'}
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-06-12 20:04:23', 1: '2020-06-12 20:04:23'}, 'change_event': {0: 'add', 1: 'modify'},
'object_type': {0: 'link', 1: 'link'}, 'old_id': {0: None, 1: '0'}, 'new_id': {0: '0', 1: '0'},
'old_attributes': {0: None, 1: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}"},
'new_attributes': {0: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}", 1: "{'a': 4, 'from': 1, 'to': 2, 'id': '0'}"},
'diff': {0: [('add', '', [('a', 1), ('from', 1), ('to', 2), ('id', '0')]), ('add', 'id', '0')],
1: [('change', 'a', (1, 4))]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare], correct_change_log_df[cols_to_compare], check_dtype=False)
def test_modify_link_adds_attributes_in_the_graph_with_multiple_edges():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'c': 100})
n.apply_attributes_to_link('0', {'b': 1})
assert n.link('0') == {'a': 1, 'from': 1, 'to': 2, 'id': '0', 'b': 1}
assert n.link('1') == {'c': 100, 'from': 1, 'to': 2, 'id': '1'}
def test_modify_links_adds_and_changes_attributes_in_the_graph_with_multiple_edges_and_change_is_recorded_by_change_log():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': {'b': 1}})
n.add_link('1', 1, 2, attribs={'c': 100})
n.apply_attributes_to_links({'0': {'a': {'b': 100}}, '1': {'a': {'b': 10}}})
assert n.link('0') == {'a': {'b': 100}, 'from': 1, 'to': 2, 'id': '0'}
assert n.link('1') == {'c': 100, 'from': 1, 'to': 2, 'id': '1', 'a': {'b': 10}}
correct_change_log_df = pd.DataFrame(
{'timestamp': {2: '2020-06-12 19:59:40', 3: '2020-06-12 19:59:40'}, 'change_event': {2: 'modify', 3: 'modify'},
'object_type': {2: 'link', 3: 'link'}, 'old_id': {2: '0', 3: '1'}, 'new_id': {2: '0', 3: '1'},
'old_attributes': {2: "{'a': {'b': 1}, 'from': 1, 'to': 2, 'id': '0'}",
3: "{'c': 100, 'from': 1, 'to': 2, 'id': '1'}"},
'new_attributes': {2: "{'a': {'b': 100}, 'from': 1, 'to': 2, 'id': '0'}",
3: "{'c': 100, 'from': 1, 'to': 2, 'id': '1', 'a': {'b': 10}}"},
'diff': {2: [('change', 'a.b', (1, 100))], 3: [('add', '', [('a', {'b': 10})])]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(2), correct_change_log_df[cols_to_compare],
check_dtype=False)
def multiply_link_attribs(link_attribs):
return link_attribs['a'] * link_attribs['c']
def test_apply_function_to_links():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 2, 'c': 3})
n.add_link('1', 1, 2, attribs={'c': 100})
n.apply_function_to_links(function=multiply_link_attribs, location='new_computed_attrib')
assert_semantically_equal(dict(n.links()),
{'0': {'a': 2, 'c': 3, 'from': 1, 'to': 2, 'id': '0', 'new_computed_attrib': 6},
'1': {'c': 100, 'from': 1, 'to': 2, 'id': '1'}})
def test_resolves_link_id_clashes_by_mapping_clashing_link_to_a_new_id(mocker):
mocker.patch.object(Network, 'generate_index_for_edge', return_value='1')
n = Network('epsg:27700')
n.add_link('0', 1, 2)
assert n.graph.has_edge(1, 2)
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert '1' not in n.link_id_mapping
n.add_link('0', 3, 0)
assert n.graph.has_edge(3, 0)
assert n.link_id_mapping['1'] == {'from': 3, 'to': 0, 'multi_edge_idx': 0}
# also assert that the link mapped to '0' is still as expected
assert n.link_id_mapping['0'] == {'from': 1, 'to': 2, 'multi_edge_idx': 0}
def test_removing_single_node():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
n.add_link('2', 2, 3, attribs={'a': 1})
n.add_link('3', 2, 3, attribs={'b': 4})
n.remove_node(1)
assert list(n.graph.nodes) == [2, 3]
assert list(n.graph.edges) == [(2, 3, 0), (2, 3, 1)]
correct_change_log = pd.DataFrame(
{'timestamp': {4: '2020-06-11 10:37:54'}, 'change_event': {4: 'remove'}, 'object_type': {4: 'node'},
'old_id': {4: 1}, 'new_id': {4: None}, 'old_attributes': {4: '{}'}, 'new_attributes': {4: None},
'diff': {4: [('remove', 'id', 1)]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(1), correct_change_log[cols_to_compare],
check_dtype=False)
def test_removing_multiple_nodes():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
n.add_link('2', 2, 3, attribs={'a': 1})
n.add_link('3', 2, 3, attribs={'b': 4})
n.remove_nodes([1, 2])
assert list(n.graph.nodes) == [3]
assert list(n.graph.edges) == []
correct_change_log = pd.DataFrame(
{'timestamp': {4: '2020-06-11 10:39:52', 5: '2020-06-11 10:39:52'}, 'change_event': {4: 'remove', 5: 'remove'},
'object_type': {4: 'node', 5: 'node'}, 'old_id': {4: 1, 5: 2}, 'new_id': {4: None, 5: None},
'old_attributes': {4: '{}', 5: '{}'}, 'new_attributes': {4: None, 5: None},
'diff': {4: [('remove', 'id', 1)], 5: [('remove', 'id', 2)]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(2), correct_change_log[cols_to_compare],
check_dtype=False)
def test_removing_single_link():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
n.add_link('2', 2, 3, attribs={'a': 1})
n.add_link('3', 2, 3, attribs={'b': 4})
assert '1' in n.link_id_mapping
n.remove_link('1')
assert list(n.graph.nodes) == [1, 2, 3]
assert list(n.graph.edges) == [(1, 2, 0), (2, 3, 0), (2, 3, 1)]
assert not '1' in n.link_id_mapping
correct_change_log = pd.DataFrame(
{'timestamp': {4: '2020-06-12 19:58:01'}, 'change_event': {4: 'remove'}, 'object_type': {4: 'link'},
'old_id': {4: '1'}, 'new_id': {4: None}, 'old_attributes': {4: "{'b': 4, 'from': 1, 'to': 2, 'id': '1'}"},
'new_attributes': {4: None},
'diff': {4: [('remove', '', [('b', 4), ('from', 1), ('to', 2), ('id', '1')]), ('remove', 'id', '1')]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(1), correct_change_log[cols_to_compare],
check_dtype=False)
def test_removing_multiple_links():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'a': 1})
n.add_link('1', 1, 2, attribs={'b': 4})
n.add_link('2', 2, 3, attribs={'a': 1})
n.add_link('3', 2, 3, attribs={'b': 4})
assert '0' in n.link_id_mapping
assert '2' in n.link_id_mapping
n.remove_links(['0', '2'])
assert list(n.graph.nodes) == [1, 2, 3]
assert list(n.graph.edges) == [(1, 2, 1), (2, 3, 1)]
assert not '0' in n.link_id_mapping
assert not '2' in n.link_id_mapping
correct_change_log = pd.DataFrame(
{'timestamp': {4: '2020-06-12 19:55:10', 5: '2020-06-12 19:55:10'}, 'change_event': {4: 'remove', 5: 'remove'},
'object_type': {4: 'link', 5: 'link'}, 'old_id': {4: '0', 5: '2'}, 'new_id': {4: None, 5: None},
'old_attributes': {4: "{'a': 1, 'from': 1, 'to': 2, 'id': '0'}", 5: "{'a': 1, 'from': 2, 'to': 3, 'id': '2'}"},
'new_attributes': {4: None, 5: None},
'diff': {4: [('remove', '', [('a', 1), ('from', 1), ('to', 2), ('id', '0')]), ('remove', 'id', '0')],
5: [('remove', '', [('a', 1), ('from', 2), ('to', 3), ('id', '2')]), ('remove', 'id', '2')]}})
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(n.change_log[cols_to_compare].tail(2), correct_change_log[cols_to_compare],
check_dtype=False)
def test_number_of_multi_edges_counts_multi_edges_on_single_edge():
n = Network('epsg:27700')
n.graph.add_edges_from([(1, 2), (2, 3), (3, 4)])
assert n.number_of_multi_edges(1, 2) == 1
def test_number_of_multi_edges_counts_multi_edges_on_multi_edge():
n = Network('epsg:27700')
n.graph.add_edges_from([(1, 2), (1, 2), (3, 4)])
assert n.number_of_multi_edges(1, 2) == 2
def test_number_of_multi_edges_counts_multi_edges_on_non_existing_edge():
n = Network('epsg:27700')
n.graph.add_edges_from([(1, 2), (1, 2), (3, 4)])
assert n.number_of_multi_edges(1214, 21321) == 0
def test_nodes_gives_iterator_of_node_id_and_attribs():
n = Network('epsg:27700')
n.graph.add_edges_from([(1, 2), (2, 3), (3, 4)])
assert list(n.nodes()) == [(1, {}), (2, {}), (3, {}), (4, {})]
def test_node_gives_node_attribss():
n = Network('epsg:27700')
n.graph.add_node(1, **{'attrib': 1})
assert n.node(1) == {'attrib': 1}
def test_edges_gives_iterator_of_edge_from_to_nodes_and_attribs():
n = Network('epsg:27700')
n.graph.add_edges_from([(1, 2), (2, 3), (3, 4)])
assert list(n.edges()) == [(1, 2, {0: {}}), (2, 3, {0: {}}), (3, 4, {0: {}})]
def test_edge_method_gives_attributes_for_given_from_and_to_nodes():
n = Network('epsg:27700')
n.graph.add_edge(1, 2, **{'attrib': 1})
assert n.edge(1, 2) == {0: {'attrib': 1}}
def test_links_gives_iterator_of_link_id_and_edge_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'f': 's'})
n.add_link('1', 2, 3, attribs={'h': 1})
assert list(n.links()) == [('0', {'f': 's', 'from': 1, 'to': 2, 'id': '0'}),
('1', {'h': 1, 'from': 2, 'to': 3, 'id': '1'})]
def test_link_gives_link_attribs():
n = Network('epsg:27700')
n.add_link('0', 1, 2, attribs={'attrib': 1})
n.add_link('0', 1, 2, attribs={'attrib': 1})
assert n.link('0') == {'attrib': 1, 'from': 1, 'to': 2, 'id': '0'}
def test_schedule_routes(network_object_from_test_data):
n = network_object_from_test_data
correct_routes = [['25508485', '21667818']]
routes = n.schedule_routes_nodes()
assert correct_routes == routes
def test_schedule_routes_with_an_empty_service(network_object_from_test_data):
n = network_object_from_test_data
n.schedule._graph.graph['routes']['1'] = {
'route_short_name': '', 'mode': 'bus',
'trips': {},
'arrival_offsets': [], 'departure_offsets': [],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [], 'ordered_stops': []}
n.schedule._graph.graph['service_to_route_map']['10314'].append('1')
n.schedule._graph.graph['route_to_service_map']['1'] = '10314'
assert set(n.schedule.service_ids()) == {'10314'}
correct_routes = [['25508485', '21667818']]
routes = n.schedule_routes_nodes()
assert correct_routes == routes
def test_schedule_routes_with_disconnected_routes(network_object_from_test_data):
n = network_object_from_test_data
n.add_link('2', 2345678, 987875)
n.schedule.apply_attributes_to_routes({'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98': {'route': ['1', '2']}})
correct_routes = [['25508485', '21667818'], [2345678, 987875]]
routes = n.schedule_routes_nodes()
assert correct_routes == routes
def test_reads_osm_network_into_the_right_schema(full_fat_default_config_path):
osm_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "osm", "osm.xml"))
network = read.read_osm(osm_test_file, full_fat_default_config_path, 1, 'epsg:27700')
assert_semantically_equal(dict(network.nodes()), {
'0': {'id': '0', 'x': 622502.8306679451, 'y': -5526117.781903352, 'lat': 0.008554364250688652,
'lon': -0.0006545205888310243, 's2_id': 1152921492875543713},
'1': {'id': '1', 'x': 622502.8132744529, 'y': -5524378.838447345, 'lat': 0.024278505899735615,
'lon': -0.0006545205888310243, 's2_id': 1152921335974974453},
'2': {'id': '2', 'x': 622502.8314014417, 'y': -5527856.725358106, 'lat': -0.00716977739835831,
'lon': -0.0006545205888310243, 's2_id': 384307157539499829}})
assert len(list(network.links())) == 11
number_of_0_multi_idx = 0
number_of_1_multi_idx = 0
number_of_2_multi_idx = 0
for link_id, edge_map in network.link_id_mapping.items():
if edge_map['multi_edge_idx'] == 0:
number_of_0_multi_idx += 1
elif edge_map['multi_edge_idx'] == 1:
number_of_1_multi_idx += 1
elif edge_map['multi_edge_idx'] == 2:
number_of_2_multi_idx += 1
assert number_of_0_multi_idx == 5
assert number_of_1_multi_idx == 4
assert number_of_2_multi_idx == 1
correct_link_attribs = [
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '0', 'to': '1', 's2_from': 1152921492875543713, 's2_to': 1152921335974974453,
'length': 1748.4487354464366,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '0'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '1', 'to': '0', 's2_from': 1152921335974974453, 's2_to': 1152921492875543713,
'length': 1748.4487354464366,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '0'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '0', 'to': '2', 's2_from': 1152921492875543713, 's2_to': 384307157539499829,
'length': 1748.4488584600201,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '100'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '2', 'to': '0', 's2_from': 384307157539499829, 's2_to': 1152921492875543713,
'length': 1748.4488584600201,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '100'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '1', 'to': '0', 's2_from': 1152921335974974453, 's2_to': 1152921492875543713,
'length': 1748.4487354464366,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '400'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '0', 'to': '1', 's2_from': 1152921492875543713, 's2_to': 1152921335974974453,
'length': 1748.4487354464366,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '400'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '2', 'to': '0', 's2_from': 384307157539499829, 's2_to': 1152921492875543713,
'length': 1748.4488584600201,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '700'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '0', 'to': '2', 's2_from': 1152921492875543713, 's2_to': 384307157539499829,
'length': 1748.4488584600201,
'attributes': {'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '700'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'}}},
{'permlanes': 3.0, 'freespeed': 12.5, 'capacity': 1800.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '2', 'to': '1', 's2_from': 384307157539499829, 's2_to': 1152921335974974453,
'length': 3496.897593906457,
'attributes': {'osm:way:lanes': {'name': 'osm:way:lanes', 'class': 'java.lang.String', 'text': '3'},
'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '47007861'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'tertiary'}}},
{'permlanes': 3.0, 'freespeed': 12.5, 'capacity': 1800.0, 'oneway': '1', 'modes': ['walk', 'car', 'bike'],
'from': '1', 'to': '0', 's2_from': 1152921335974974453, 's2_to': 1152921492875543713,
'length': 1748.4487354464366,
'attributes': {'osm:way:lanes': {'name': 'osm:way:lanes', 'class': 'java.lang.String', 'text': '3'},
'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String', 'text': '47007861'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'tertiary'}}},
{'permlanes': 1.0, 'freespeed': 12.5, 'capacity': 600.0, 'oneway': '1',
'modes': ['car', 'walk', 'bike'], 'from': '1', 'to': '0',
's2_from': 1152921335974974453, 's2_to': 1152921492875543713,
'length': 1748.4487354464366, 'attributes': {
'osm:way:osmid': {'name': 'osm:way:osmid', 'class': 'java.lang.String',
'text': '47007862'},
'osm:way:lanes': {'name': 'osm:way:lanes', 'class': 'java.lang.String',
'text': '3;2'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'tertiary'}}}
]
cols = ['permlanes', 'freespeed', 'capacity', 'oneway', 'modes', 'from', 'to', 's2_from', 's2_to', 'length',
'attributes']
assert len(network.link_id_mapping) == 11
for link in network.link_id_mapping.keys():
satisfied = False
attribs_to_test = network.link(link).copy()
del attribs_to_test['id']
for link_attrib in correct_link_attribs:
try:
assert_semantically_equal(attribs_to_test, link_attrib)
satisfied = True
except AssertionError:
pass
assert satisfied
def test_read_matsim_network_with_duplicated_node_ids_records_removal_in_changelog(mocker):
dup_nodes = {'21667818': [
{'id': '21667818', 'x': 528504.1342843144, 'y': 182155.7435136598, 'lon': -0.14910908709500162,
'lat': 51.52370573323939, 's2_id': 5221390302696205321}]}
mocker.patch.object(matsim_reader, 'read_network', return_value=(nx.MultiDiGraph(), 2, dup_nodes, {}))
network = read.read_matsim(path_to_network=pt2matsim_network_test_file, epsg='epsg:27700')
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-07-02 11:36:54'}, 'change_event': {0: 'remove'}, 'object_type': {0: 'node'},
'old_id': {0: '21667818'}, 'new_id': {0: None},
'old_attributes': {
0: "{'id': '21667818', 'x': 528504.1342843144, 'y': 182155.7435136598, 'lon': -0.14910908709500162, 'lat': 51.52370573323939, 's2_id': 5221390302696205321}"},
'new_attributes': {0: None},
'diff': {0: [('remove', '', [('id', '21667818'), ('x', 528504.1342843144),
('y', 182155.7435136598),
('lon', -0.14910908709500162),
('lat', 51.52370573323939),
('s2_id', 5221390302696205321)]),
('remove', 'id', '21667818')]}}
)
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network.change_log[cols_to_compare].tail(1), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_read_matsim_network_with_duplicated_link_ids_records_reindexing_in_changelog(mocker):
dup_links = {'1': ['1_1']}
correct_link_id_map = {'1': {'from': '25508485', 'to': '21667818', 'multi_edge_idx': 0},
'1_1': {'from': '25508485', 'to': '21667818', 'multi_edge_idx': 1}}
mocker.patch.object(matsim_reader, 'read_network',
return_value=(nx.MultiDiGraph(), correct_link_id_map, {}, dup_links))
mocker.patch.object(Network, 'link', return_value={'heyooo': '1'})
network = read.read_matsim(path_to_network=pt2matsim_network_test_file, epsg='epsg:27700')
correct_change_log_df = pd.DataFrame(
{'timestamp': {0: '2020-07-02 11:59:00'}, 'change_event': {0: 'modify'}, 'object_type': {0: 'link'},
'old_id': {0: '1'}, 'new_id': {0: '1_1'}, 'old_attributes': {0: "{'heyooo': '1'}"},
'new_attributes': {0: "{'heyooo': '1'}"}, 'diff': {0: [('change', 'id', ('1', '1_1'))]}}
)
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'old_attributes', 'new_attributes', 'diff']
assert_frame_equal(network.change_log[cols_to_compare].tail(1), correct_change_log_df[cols_to_compare],
check_names=False,
check_dtype=False)
def test_has_node_when_node_is_in_the_graph():
n = Network('epsg:27700')
n.add_node('1')
assert n.has_node('1')
def test_has_node_when_node_is_not_in_the_graph():
n = Network('epsg:27700')
assert not n.has_node('1')
def test_has_nodes_when_nodes_in_the_graph():
n = Network('epsg:27700')
n.add_node('1')
n.add_node('2')
n.add_node('3')
assert n.has_nodes(['1', '2'])
def test_has_nodes_when_only_some_nodes_in_the_graph():
n = Network('epsg:27700')
n.add_node('1')
n.add_node('2')
n.add_node('3')
assert not n.has_nodes(['1', '4'])
def test_has_nodes_when_none_of_the_nodes_in_the_graph():
n = Network('epsg:27700')
n.add_node('1')
n.add_node('2')
n.add_node('3')
assert not n.has_nodes(['10', '20'])
def test_has_edge_when_edge_is_in_the_graph():
n = Network('epsg:27700')
n.add_link('1', 1, 2)
assert n.has_edge(1, 2)
def test_has_edge_when_edge_is_not_in_the_graph():
n = Network('epsg:27700')
assert not n.has_edge(1, 2)
def test_has_link_when_link_is_in_the_graph():
n = Network('epsg:27700')
n.add_link('1', 1, 2)
assert n.has_link('1')
def test_has_link_when_link_is_not_in_the_graph():
n = Network('epsg:27700')
assert not n.has_link('1')
def test_has_link_when_link_id_is_in_the_network_but_corresponding_edge_is_not():
# unlikely scenario but possible if someone uses a non genet method to play with the graph
n = Network('epsg:27700')
n.link_id_mapping['1'] = {'from': 1, 'to': 2, 'multi_edge_idx': 0}
assert not n.has_link('1')
def test_has_links_when_links_in_the_graph():
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 1, 2)
n.add_link('3', 1, 2)
assert n.has_links(['1', '2'])
def test_has_links_when_only_some_links_in_the_graph():
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 1, 2)
n.add_link('3', 1, 2)
assert not n.has_links(['1', '4'])
def test_has_links_when_none_of_the_links_in_the_graph():
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 1, 2)
n.add_link('3', 1, 2)
assert not n.has_links(['10', '20'])
def test_has_links_with_passing_attribute_condition():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': 'car'})
n.add_link('2', 1, 2, attribs={'modes': 'car'})
assert n.has_links(['1', '2'], conditions={'modes': 'car'})
def test_has_links_with_failing_attribute_condition():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': 'bus'})
n.add_link('2', 1, 2, attribs={'modes': 'walk'})
assert not n.has_links(['1', '2'], conditions={'modes': 'car'})
def test_has_links_not_in_graph_with_attribute_condition():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': 'car'})
n.add_link('2', 1, 2, attribs={'modes': 'car'})
assert not n.has_links(['10', '20'], conditions={'modes': 'car'})
def test_has_valid_link_chain_with_a_valid_link_chain():
n = Network('epsg:27700')
n.add_link('1', 1, 3)
n.add_link('2', 3, 4)
assert n.has_valid_link_chain(['1', '2'])
def test_has_valid_link_chain_with_an_invalid_link_chain():
n = Network('epsg:27700')
n.add_link('1', 1, 3)
n.add_link('2', 2, 4)
assert not n.has_valid_link_chain(['1', '2'])
def test_has_valid_link_chain_with_an_empty_link_chain():
n = Network('epsg:27700')
n.add_link('1', 1, 3)
n.add_link('2', 2, 4)
assert not n.has_valid_link_chain([])
def test_calculate_route_distance_with_links_that_have_length_attrib():
n = Network('epsg:27700')
n.add_link('1', 1, 3, attribs={'length': 2})
n.add_link('2', 3, 4, attribs={'length': 1})
assert n.route_distance(['1', '2']) == 3
def test_calculate_route_distance_with_links_that_dont_have_length_attrib():
n = Network('epsg:27700')
n.add_node(1, attribs={'s2_id': 12345})
n.add_node(3, attribs={'s2_id': 345435})
n.add_node(4, attribs={'s2_id': 568767})
n.add_link('1', 1, 3)
n.add_link('2', 3, 4)
assert round(n.route_distance(['1', '2']), 6) == 0.013918
def test_calculate_route_distance_returns_0_when_route_is_invalid():
n = Network('epsg:27700')
n.add_link('1', 1, 3)
n.add_link('2', 5, 4)
assert n.route_distance(['1', '2']) == 0
def test_valid_network_route():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': ['car', 'bus']})
n.add_link('2', 2, 3, attribs={'modes': ['car', 'bus']})
r = Route(route_short_name='', mode='bus', stops=[], trips={}, arrival_offsets=[], departure_offsets=[],
route=['1', '2'])
assert n.is_valid_network_route(r)
def test_network_route_with_wrong_links():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': ['car', 'bus']})
n.add_link('2', 3, 2, attribs={'modes': ['car', 'bus']})
r = Route(route_short_name='', mode='bus', stops=[], trips={}, arrival_offsets=[], departure_offsets=[],
route=['1', '2'])
assert not n.is_valid_network_route(r)
def test_network_route_with_empty_link_list():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': ['car', 'bus']})
n.add_link('2', 3, 2, attribs={'modes': ['car', 'bus']})
r = Route(route_short_name='', mode='bus', stops=[], trips={}, arrival_offsets=[], departure_offsets=[],
route=[])
assert not n.is_valid_network_route(r)
def test_network_route_with_incorrect_modes_on_link():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'modes': ['car']})
n.add_link('2', 3, 2, attribs={'modes': ['car', 'bus']})
r = Route(route_short_name='', mode='bus', stops=[], trips={}, arrival_offsets=[], departure_offsets=[],
route=['1', '2'])
assert not n.is_valid_network_route(r)
def test_generate_index_for_node_gives_next_integer_string_when_you_have_matsim_usual_integer_index():
n = Network('epsg:27700')
n.add_node('1')
assert n.generate_index_for_node() == '2'
def test_generate_index_for_node_gives_string_based_on_length_node_ids_when_you_have_mixed_index():
n = Network('epsg:27700')
n.add_node('1')
n.add_node('1x')
assert n.generate_index_for_node() == '3'
def test_generate_index_for_node_gives_string_based_on_length_node_ids_when_you_have_all_non_int_index():
n = Network('epsg:27700')
n.add_node('1w')
n.add_node('1x')
assert n.generate_index_for_node() == '3'
def test_generate_index_for_node_gives_uuid4_as_last_resort(mocker):
mocker.patch.object(uuid, 'uuid4')
n = Network('epsg:27700')
n.add_node('1w')
n.add_node('1x')
n.add_node('4')
n.generate_index_for_node()
uuid.uuid4.assert_called_once()
def test_generating_n_indicies_for_nodes():
n = Network('epsg:27700')
n.add_nodes({str(i): {} for i in range(10)})
idxs = n.generate_indices_for_n_nodes(5)
assert len(idxs) == 5
assert not set(dict(n.nodes()).keys()) & idxs
def test_generate_index_for_edge_gives_next_integer_string_when_you_have_matsim_usual_integer_index():
n = Network('epsg:27700')
n.link_id_mapping = {'1': {}, '2': {}}
new_idx = n.generate_index_for_edge()
assert isinstance(new_idx, str)
assert new_idx not in ['1', '2']
def test_generate_index_for_edge_gives_string_based_on_length_link_id_mapping_when_you_have_mixed_index():
n = Network('epsg:27700')
n.link_id_mapping = {'1': {}, 'x2': {}}
new_idx = n.generate_index_for_edge()
assert isinstance(new_idx, str)
assert new_idx not in ['1', 'x2']
def test_generate_index_for_edge_gives_string_based_on_length_link_id_mapping_when_you_have_all_non_int_index():
n = Network('epsg:27700')
n.link_id_mapping = {'1x': {}, 'x2': {}}
new_idx = n.generate_index_for_edge()
assert isinstance(new_idx, str)
assert new_idx not in ['1x', 'x2']
def test_index_graph_edges_generates_completely_new_index():
n = Network('epsg:27700')
n.add_link('1x', 1, 2)
n.add_link('x2', 1, 2)
n.index_graph_edges()
assert list(n.link_id_mapping.keys()) == ['0', '1']
def test_generating_n_indicies_for_edges():
n = Network('epsg:27700')
n.add_links({str(i): {'from': 0, 'to': 1} for i in range(11)})
idxs = n.generate_indices_for_n_edges(7)
assert len(idxs) == 7
for i in idxs:
assert isinstance(i, str)
assert not set(n.link_id_mapping.keys()) & idxs
def test_has_schedule_with_valid_network_routes_with_valid_routes(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={"modes": ['bus']})
n.add_link('2', 2, 3, attribs={"modes": ['car', 'bus']})
route.route = ['1', '2']
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route])])
route.reindex('service_1')
n.schedule.add_route('service', route)
n.schedule.apply_attributes_to_routes({'service_0': {'route': ['1', '2']}, 'service_1': {'route': ['1', '2']}})
assert n.has_schedule_with_valid_network_routes()
def test_has_schedule_with_valid_network_routes_with_some_valid_routes(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 2, 3)
route.route = ['1', '2']
route_2 = Route(route_short_name='', mode='bus', stops=[],
trips={'trip_id': ['1'], 'trip_departure_time': ['13:00:00'], 'vehicle_id': ['veh_1_bus']},
arrival_offsets=[], departure_offsets=[], route=['10000'])
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route, route_2])])
assert not n.has_schedule_with_valid_network_routes()
def test_has_schedule_with_valid_network_routes_with_invalid_routes(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 2, 3)
route.route = ['3', '4']
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route, route])])
assert not n.has_schedule_with_valid_network_routes()
def test_has_schedule_with_valid_network_routes_with_empty_routes(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 2, 3)
route.route = []
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route, route])])
assert not n.has_schedule_with_valid_network_routes()
def test_invalid_network_routes_with_valid_route(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={"modes": ['car', 'bus']})
n.add_link('2', 2, 3, attribs={"modes": ['bus']})
route.reindex('route')
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route])])
n.schedule.apply_attributes_to_routes({'route': {'route': ['1', '2']}})
assert n.invalid_network_routes() == []
def test_invalid_network_routes_with_invalid_route(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 2, 3)
route.reindex('route')
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route])])
n.schedule.apply_attributes_to_routes({'route': {'route': ['3', '4']}})
assert n.invalid_network_routes() == ['route']
def test_invalid_network_routes_with_empty_route(route):
n = Network('epsg:27700')
n.add_link('1', 1, 2)
n.add_link('2', 2, 3)
route.reindex('route')
n.schedule = Schedule(n.epsg, [Service(id='service', routes=[route])])
n.schedule.apply_attributes_to_routes({'route': {'route': []}})
assert n.invalid_network_routes() == ['route']
def test_generate_validation_report_with_pt2matsim_network(network_object_from_test_data):
n = network_object_from_test_data
report = n.generate_validation_report()
correct_report = {
'graph': {
'graph_connectivity': {
'car': {'problem_nodes': {'dead_ends': ['21667818'], 'unreachable_node': ['25508485']},
'number_of_connected_subgraphs': 2},
'walk': {'problem_nodes': {'dead_ends': ['21667818'], 'unreachable_node': ['25508485']},
'number_of_connected_subgraphs': 2},
'bike': {'problem_nodes': {'dead_ends': [], 'unreachable_node': []},
'number_of_connected_subgraphs': 0}},
'link_attributes': {
'links_over_1km_length': {'number_of': 0, 'percentage': 0.0, 'link_ids': []},
'zero_attributes': {}}},
'schedule': {
'schedule_level': {'is_valid_schedule': False, 'invalid_stages': ['not_has_valid_services'],
'has_valid_services': False, 'invalid_services': ['10314']},
'service_level': {
'10314': {'is_valid_service': False, 'invalid_stages': ['not_has_valid_routes'],
'has_valid_routes': False, 'invalid_routes': ['<KEY>']}},
'route_level': {'10314': {'<KEY>': {'is_valid_route': False,
'invalid_stages': [
'not_has_correctly_ordered_route']}}}},
'routing': {'services_have_routes_in_the_graph': False,
'service_routes_with_invalid_network_route': ['<KEY>'],
'route_to_crow_fly_ratio': {
'10314': {'<KEY>': 'Division by zero'}}}}
assert_semantically_equal(report, correct_report)
def test_generate_validation_report_with_correct_schedule(correct_schedule):
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'length': 2, "modes": ['car', 'bus']})
n.add_link('2', 2, 3, attribs={'length': 2, "modes": ['car', 'bus']})
n.schedule = correct_schedule
report = n.generate_validation_report()
correct_report = {
'graph': {
'graph_connectivity': {'car': {'problem_nodes': {'dead_ends': [3], 'unreachable_node': [1]},
'number_of_connected_subgraphs': 3},
'walk': {'problem_nodes': {'dead_ends': [], 'unreachable_node': []},
'number_of_connected_subgraphs': 0},
'bike': {'problem_nodes': {'dead_ends': [], 'unreachable_node': []},
'number_of_connected_subgraphs': 0}},
'link_attributes': {'links_over_1km_length': {'number_of': 0, 'percentage': 0.0, 'link_ids': []},
'zero_attributes': {}}},
'schedule': {'schedule_level': {'is_valid_schedule': True, 'invalid_stages': [], 'has_valid_services': True,
'invalid_services': []}, 'service_level': {
'service': {'is_valid_service': True, 'invalid_stages': [], 'has_valid_routes': True,
'invalid_routes': []}}, 'route_level': {
'service': {'1': {'is_valid_route': True, 'invalid_stages': []},
'2': {'is_valid_route': True, 'invalid_stages': []}}}},
'routing': {'services_have_routes_in_the_graph': True, 'service_routes_with_invalid_network_route': [],
'route_to_crow_fly_ratio': {'service': {'1': 0.037918141839160244, '2': 0.037918141839160244}}}}
assert_semantically_equal(report, correct_report)
def test_zero_value_attributes_show_up_in_validation_report():
n = Network('epsg:27700')
n.add_link('1', 1, 2, attribs={'length': 0, 'capacity': 0.0, 'freespeed': '0.0', "modes": ['car', 'bus']})
n.add_link('2', 2, 3, attribs={'length': 2, 'capacity': 1, 'freespeed': 2, "modes": ['car', 'bus']})
report = n.generate_validation_report()
correct_report = {'graph': {
'graph_connectivity': {
'car': {'problem_nodes': {'dead_ends': [3], 'unreachable_node': [1]}, 'number_of_connected_subgraphs': 3},
'walk': {'problem_nodes': {'dead_ends': [], 'unreachable_node': []}, 'number_of_connected_subgraphs': 0},
'bike': {'problem_nodes': {'dead_ends': [], 'unreachable_node': []}, 'number_of_connected_subgraphs': 0}},
'link_attributes': {
'links_over_1km_length': {'number_of': 0, 'percentage': 0.0, 'link_ids': []},
'zero_attributes': {
'length': {'number_of': 1, 'percentage': 0.5, 'link_ids': ['1']},
'capacity': {'number_of': 1, 'percentage': 0.5, 'link_ids': ['1']},
'freespeed': {'number_of': 1, 'percentage': 0.5, 'link_ids': ['1']}}}}}
assert_semantically_equal(report, correct_report)
def test_write_to_matsim_generates_three_matsim_files(network_object_from_test_data, tmpdir):
# the correctness of these files is tested elsewhere
expected_network_xml = os.path.join(tmpdir, 'network.xml')
assert not os.path.exists(expected_network_xml)
expected_schedule_xml = os.path.join(tmpdir, 'schedule.xml')
assert not os.path.exists(expected_schedule_xml)
expected_vehicle_xml = os.path.join(tmpdir, 'vehicles.xml')
assert not os.path.exists(expected_vehicle_xml)
network_object_from_test_data.write_to_matsim(tmpdir)
assert os.path.exists(expected_network_xml)
assert os.path.exists(expected_schedule_xml)
assert os.path.exists(expected_vehicle_xml)
def test_write_to_matsim_generates_network_matsim_file_if_network_is_car_only(network_object_from_test_data, tmpdir):
# the correctness of these files is tested elsewhere
expected_network_xml = os.path.join(tmpdir, 'network.xml')
assert not os.path.exists(expected_network_xml)
expected_schedule_xml = os.path.join(tmpdir, 'schedule.xml')
assert not os.path.exists(expected_schedule_xml)
expected_vehicle_xml = os.path.join(tmpdir, 'vehicles.xml')
assert not os.path.exists(expected_vehicle_xml)
n = network_object_from_test_data
n.schedule = Schedule('epsg:27700')
assert not n.schedule
n.write_to_matsim(tmpdir)
assert os.path.exists(expected_network_xml)
assert not os.path.exists(expected_schedule_xml)
assert not os.path.exists(expected_vehicle_xml)
def test_write_to_matsim_generates_change_log_csv(network_object_from_test_data, tmpdir):
expected_change_log_path = os.path.join(tmpdir, 'network_change_log.csv')
expected_schedule_change_log_path = os.path.join(tmpdir, 'schedule_change_log.csv')
assert not os.path.exists(expected_change_log_path)
network_object_from_test_data.write_to_matsim(tmpdir)
assert os.path.exists(expected_change_log_path)
assert os.path.exists(expected_schedule_change_log_path)
benchmark_path_json = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "auxiliary_files", "links_benchmark.json"))
benchmark_path_csv = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "auxiliary_files", "links_benchmark.csv"))
@pytest.fixture()
def aux_network():
n = Network('epsg:27700')
n.add_nodes({'1': {'x': 1, 'y': 2, 's2_id': 0}, '2': {'x': 1, 'y': 2, 's2_id': 0},
'3': {'x': 1, 'y': 2, 's2_id': 0}, '4': {'x': 1, 'y': 2, 's2_id': 0}})
n.add_links(
{'1': {'from': '1', 'to': '2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1, 'modes': {'car'}},
'2': {'from': '1', 'to': '3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1, 'modes': {'car'}},
'3': {'from': '2', 'to': '4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1, 'modes': {'car'}},
'4': {'from': '3', 'to': '4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1, 'modes': {'car'}}})
n.read_auxiliary_link_file(benchmark_path_json)
n.read_auxiliary_node_file(benchmark_path_csv)
return n
def test_reindexing_network_node_with_auxiliary_files(aux_network):
aux_network.reindex_node('3', '0')
assert aux_network.auxiliary_files['node']['links_benchmark.csv'].map == {'2': '2', '3': '0', '4': '4', '1': '1'}
assert aux_network.auxiliary_files['link']['links_benchmark.json'].map == {'2': '2', '1': '1', '3': '3', '4': '4'}
def test_reindexing_network_link_with_auxiliary_files(aux_network):
aux_network.reindex_link('2', '0')
assert aux_network.auxiliary_files['node']['links_benchmark.csv'].map == {'2': '2', '3': '3', '4': '4', '1': '1'}
assert aux_network.auxiliary_files['link']['links_benchmark.json'].map == {'2': '0', '1': '1', '3': '3', '4': '4'}
def test_removing_network_node_with_auxiliary_files(aux_network):
aux_network.remove_nodes(['1', '2'])
aux_network.remove_node('3')
assert aux_network.auxiliary_files['node']['links_benchmark.csv'].map == {'2': None, '3': None, '4': '4', '1': None}
assert aux_network.auxiliary_files['link']['links_benchmark.json'].map == {'2': '2', '1': '1', '3': '3', '4': '4'}
def test_removing_network_link_with_auxiliary_files(aux_network):
aux_network.remove_links(['1', '2'])
aux_network.remove_link('3')
assert aux_network.auxiliary_files['node']['links_benchmark.csv'].map == {'2': '2', '3': '3', '4': '4', '1': '1'}
assert aux_network.auxiliary_files['link']['links_benchmark.json'].map == {'2': None, '1': None, '3': None,
'4': '4'}
def test_simplifying_network_with_auxiliary_files(aux_network):
aux_network.simplify()
assert aux_network.auxiliary_files['node']['links_benchmark.csv'].map == {'1': '1', '2': None, '3': None, '4': '4'}
assert aux_network.auxiliary_files['link']['links_benchmark.json'].map == {
'2': aux_network.link_simplification_map['2'],
'1': aux_network.link_simplification_map['1'],
'3': aux_network.link_simplification_map['3'],
'4': aux_network.link_simplification_map['4']}
def test_saving_network_with_auxiliary_files_with_changes(aux_network, tmpdir):
aux_network.auxiliary_files['node']['links_benchmark.csv'].map = {'2': None, '3': None, '4': '04', '1': None}
aux_network.auxiliary_files['link']['links_benchmark.json'].map = {'2': '002', '1': '001', '3': '003', '4': '004'}
expected_json_aux_file = os.path.join(tmpdir, 'auxiliary_files', 'links_benchmark.json')
expected_csv_aux_file = os.path.join(tmpdir, 'auxiliary_files', 'links_benchmark.csv')
assert not os.path.exists(expected_json_aux_file)
assert not os.path.exists(expected_csv_aux_file)
aux_network.write_to_matsim(tmpdir)
assert os.path.exists(expected_json_aux_file)
assert os.path.exists(expected_csv_aux_file)
with open(expected_json_aux_file) as json_file:
assert json.load(json_file)['car']['2']['in']['links'] == ['002']
assert pd.read_csv(expected_csv_aux_file)['links'].to_dict() == {0: '[None]', 1: '[None]', 2: '[None]', 3: "['04']"}
@pytest.fixture()
def network_1_geo_and_json(network1):
nodes = gpd.GeoDataFrame({
'101982': {'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879,
'geometry': Point(528704.1425925883, 182068.78193707118)},
'101986': {'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387,
'geometry': Point(528835.203274008, 182006.27331298392)}}).T
nodes.index = nodes.index.set_names(['index'])
links = gpd.GeoDataFrame({
'0': {'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0,
'permlanes': 1.0, 'oneway': '1', 'modes': ['car', 'bike'], 's2_from': 5221390329378179879,
's2_to': 5221390328605860387, 'length': 52.765151087870265,
'geometry': LineString(
[(528704.1425925883, 182068.78193707118), (528835.203274008, 182006.27331298392)]),
'u': '101982', 'v': '101986',
'attributes': {
'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String',
'text': 'Brunswick Place'}}}}).T
links.index = links.index.set_names(['index'])
# most networks are expected to have complex geometries
network1.apply_attributes_to_links(
{'0': {
'modes': ['car', 'bike'],
'geometry': LineString([(528704.1425925883, 182068.78193707118), (528835.203274008, 182006.27331298392)])}})
return {
'network': network1,
'expected_json': {'nodes': {
'101982': {'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879,
'geometry': [528704.1425925883, 182068.78193707118]},
'101986': {'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387,
'geometry': [528835.203274008, 182006.27331298392]}},
'links': {
'0': {'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0,
'permlanes': 1.0, 'oneway': '1', 'modes': ['car', 'bike'], 's2_from': 5221390329378179879,
's2_to': 5221390328605860387, 'length': 52.765151087870265,
'geometry': 'ez~hinaBc~sze|`@gx|~W|uo|J', 'u': '101982', 'v': '101986',
'attributes': {
'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String', 'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String',
'text': 'Brunswick Place'}}}}},
'expected_sanitised_json': {'nodes': {
'101982': {'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879,
'geometry': '528704.1425925883,182068.78193707118'},
'101986': {'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387,
'geometry': '528835.203274008,182006.27331298392'}},
'links': {
'0': {'id': '0', 'from': '101982', 'to': '101986', 'freespeed': 4.166666666666667, 'capacity': 600.0,
'permlanes': 1.0, 'oneway': '1', 'modes': 'car,bike', 's2_from': 5221390329378179879,
's2_to': 5221390328605860387, 'length': 52.765151087870265,
'geometry': 'ez~hinaBc~sze|`@gx|~W|uo|J', 'u': '101982', 'v': '101986',
'attributes': {
'osm:way:access': {'name': 'osm:way:access', 'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway', 'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id', 'class': 'java.lang.Long', 'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name', 'class': 'java.lang.String',
'text': 'Brunswick Place'}}}}},
'expected_geodataframe': {'nodes': nodes, 'links': links}
}
def test_transforming_network_to_json(network_1_geo_and_json):
assert_semantically_equal(network_1_geo_and_json['network'].to_json(), network_1_geo_and_json['expected_json'])
def test_saving_network_to_json(network_1_geo_and_json, tmpdir):
network_1_geo_and_json['network'].write_to_json(tmpdir)
expected_network_json = os.path.join(tmpdir, 'network.json')
assert os.path.exists(expected_network_json)
with open(expected_network_json) as json_file:
output_json = json.load(json_file)
assert_semantically_equal(
output_json,
network_1_geo_and_json['expected_sanitised_json'])
def test_transforming_network_to_geodataframe(network_1_geo_and_json):
node_cols = ['id', 'x', 'y', 'lon', 'lat', 's2_id', 'geometry']
link_cols = ['id', 'from', 'to', 'freespeed', 'capacity', 'permlanes', 'oneway', 'modes', 's2_from', 's2_to',
'length', 'geometry', 'attributes', 'u', 'v']
_network = network_1_geo_and_json['network'].to_geodataframe()
assert set(_network['nodes'].columns) == set(node_cols)
assert_frame_equal(_network['nodes'][node_cols], network_1_geo_and_json['expected_geodataframe']['nodes'][node_cols], check_dtype=False)
assert set(_network['links'].columns) == set(link_cols)
| assert_frame_equal(_network['links'][link_cols], network_1_geo_and_json['expected_geodataframe']['links'][link_cols], check_dtype=False) | pandas.testing.assert_frame_equal |
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.model_selection import GridSearchCV
PATH_HASHTAGS = 'tweet_tokens/training/hashtags/hashtags.csv'
PATH_MENTIONS = 'tweet_tokens/training/mentions/mentions.csv'
df_hashtags = pd.read_csv(PATH_HASHTAGS, header=0, delimiter='\x01')
df_mentions = | pd.read_csv(PATH_MENTIONS, header=0, delimiter='\x01') | pandas.read_csv |
from Bio import SeqIO
import pandas as pd
import numpy as np
import subprocess
import os
import re
import time
import random
import itertools
import gzip
import json
import platform
import ast
import multiprocessing as mp
from multiprocessing import Manager
from os.path import expanduser
from importlib.machinery import SourceFileLoader
from scipy.stats import binom
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
def barcode_errors(sorted_sampid):
base_map = {"A":1, "T":2, "G":3, "C":4, "-":0}
for sampid in sorted_sampid:
# get mapped reads with true aln pos
mapped_reads_df = | pd.read_csv("./results/barcode_sort/mapped_%s_true_nucpos.csv"%(sampid)) | pandas.read_csv |
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras import metrics, losses
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.preprocessing import MinMaxScaler
chart_names = ["total-bitcoins", "market-price", "market-cap", "trade-volume", "blocks-size", "avg-block-size",
"n-transactions-per-block", "median-confirmation-time", "hash-rate",
"difficulty", "miners-revenue", "transaction-fees", "transaction-fees-usd",
"cost-per-transaction-percent", "cost-per-transaction", "n-unique-addresses", "n-transactions",
"n-transactions-total", "transactions-per-second", "mempool-size", "mempool-growth", "mempool-count",
"utxo-count", "n-transactions-excluding-popular",
"n-transactions-excluding-chains-longer-than-100", "output-volume", "estimated-transaction-volume-usd",
"estimated-transaction-volume", "my-wallet-n-users"]
chart_names2 = ["market-price"]
my_metrics = [metrics.binary_accuracy,
metrics.mean_absolute_error,
metrics.sparse_categorical_accuracy,
losses.mean_absolute_percentage_error,
losses.squared_hinge,
losses.hinge,
losses.poisson]
def plot(x_list, y_list, y_list2, chart_name):
plt.plot(x_list, y_list, label='line1')
plt.plot(x_list, y_list2, label='line2')
plt.title(chart_name)
plt.legend(['predicted', 'real'], loc='upper left')
plt.show()
def create_model():
model = Sequential()
model.add(LSTM(units=64, input_shape=(x_train.shape[1], x_train.shape[2])))
model.add(Dropout(rate=0.3))
model.add(Dense(1))
model.compile(optimizer='adagrad', loss='mse',
metrics=my_metrics)
return model
def train_model(model):
story = model.fit(x_train, y_train, epochs=20, batch_size=128, callbacks=[], validation_data=(x_val, y_val))
plt.plot(story.history['loss'])
plt.plot(story.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Val'], loc='upper left')
plt.show()
def predict(xs, ys, name):
predicted = model.predict(xs)
if normalization:
predicted = test_scaller2.inverse_transform(predicted)
score = model.evaluate(x=xs, y=ys, verbose=2)
# prepend scores with default loss function
my_metrics.insert(0, losses.mean_squared_error)
print(f'--------- {name} -----------')
[print(f'{my_metrics[index].__name__}: {item}') for index, item in enumerate(score)]
my_metrics.pop()
x_series = list(range(0, predicted.shape[0]))
x_series = np.reshape(x_series, (x_series.__len__(), 1))
if normalization:
plot(x_series, predicted, test_scaller2.inverse_transform(ys.reshape(-1, 1)), name)
else:
plot(x_series, predicted, ys.reshape(-1, 1), name)
if __name__ == '__main__':
normalization = True
data_2018 = | pd.read_csv('bitcoin_market_data.csv', sep=',') | pandas.read_csv |
import logging
import pandas as pd
import pytest
from split_schedule.errors import NoScheduleError
from split_schedule.schedule_builder import ScheduleBuilder, SchedulingError
from tests.helpers import init_classes_check, reduce_classes_check, total_classes_check
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_classs_size(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["<NAME>"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return(*args, **kwargs):
return pd.DataFrame(
{
"student": ["<NAME>"],
"original": 3,
"scheduled": 2,
}
).set_index("student")
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_class_size", mock_return)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Classes contain too many students" in caplog.text
else:
assert "Classes contain too many students" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_classes_number(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [1, 2, 1, 2],
"class": ["test class 1", "test class 2", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 2", "test 2"],
}
def mock_return_validated_classes(*args, **kwargs):
return pd.DataFrame(data_2)
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_classes", mock_return_validated_classes)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student missing" in caplog.text
else:
assert "Student missing" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_same_day(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [
1,
1,
2,
],
"class": [
"test class 1",
"test class 1",
"test class 3",
],
"total_students": [
2,
2,
1,
],
"max_students": [
2,
2,
1,
],
"num_classes": [
1,
1,
1,
],
"day_number": [
1,
1,
2,
],
"student": [
"test 1",
"test 2",
"test 1",
],
}
def mock_return_validated_days(*args, **kwargs):
return pd.DataFrame(data_2)
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_same_day", mock_return_validated_days)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student not on the same day" in caplog.text
else:
assert "Student not on the same day" not in caplog.text
@pytest.mark.parametrize("max_tries", [1, 2])
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_validated_students(monkeypatch, tmp_path, caplog, max_tries, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["<NAME>"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return_validated_students(*args, **kwargs):
return ["test 1"]
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_validate_students", mock_return_validated_students)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(
test_file, 0.2, max_tries=max_tries, verbose=verbose
)
if verbose:
assert "Student original number" in caplog.text
else:
assert "Student original number" not in caplog.text
@pytest.mark.parametrize("verbose", [True, False])
def test_build_schedule_restart(monkeypatch, tmp_path, caplog, verbose):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1],
"class": ["test class 1"],
"student": ["test 1"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
def mock_return(*args, **kwargs):
return None
schedule_builder = ScheduleBuilder()
monkeypatch.setattr(ScheduleBuilder, "_fill_classes", mock_return)
with pytest.raises(SchedulingError):
schedule_builder.build_schedule_from_file(test_file, 0.2, max_tries=2, verbose=verbose)
if verbose:
assert "No schedule found. Retrying" in caplog.text
else:
assert "No schedule found. Retrying" not in caplog.text
def test_fill_classes_match_no_space(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 2,
"max_students": 1,
"num_classes": 1,
"classes": [set()],
},
{
"block": 2,
"class_name": "test class 2",
"total_students": 2,
"max_students": 1,
"num_classes": 1,
"classes": [set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 2": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
assert not fill_classes
def test_fill_classes_no_match_no_space(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1],
"class": [
"test class 1",
],
"student": ["test 1"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 1,
"max_students": 0,
"num_classes": 1,
"classes": [set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
assert not fill_classes
def test_fill_classes_match_move_day(tmp_path):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 2, 1, 2, 1, 2],
"class": [
"test class 1",
"test class 2",
"test class 1",
"test class 2",
"test class 1",
"test class 2",
],
"student": ["test 1", "test 1", "test 2", "test 2", "test 3", "test 3"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
fill_classes = [
{
"block": 1,
"class_name": "test class 1",
"total_students": 3,
"max_students": 2,
"num_classes": 2,
"classes": [set(), set()],
},
{
"block": 2,
"class_name": "test class 2",
"total_students": 3,
"max_students": 2,
"num_classes": 2,
"classes": [set(), set()],
},
]
student_classes_grouped = {
"test 1": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 2": {"blocks": {1: "test class 1", 2: "test class 2"}},
"test 3": {"blocks": {1: "test class 1", 2: "test class 2"}},
}
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_file)
fill_classes = schedule_builder._fill_classes(fill_classes, student_classes_grouped)
class_size = [sorted([len(y) for y in x["classes"]]) for x in fill_classes]
expected = [[1, 2], [1, 2]]
assert expected == class_size
def test_find_matches(student_matches_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
matches = schedule_builder._find_matches()
assert matches == student_matches_check
def test_find_matches_unused_order_found(tmp_path, caplog):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
class TestingScheduleBuilder(ScheduleBuilder):
def __init__(self, schedule_file_path):
self.final_schedule_df = None
self._schedule_df = self._load_data(schedule_file_path)
self._attempted_df = [df]
self._attempt = 1
self._verbose = True
logging.basicConfig(format="%(asctime)s: %(levelname)s: %(message)s")
logging.root.setLevel(level=logging.INFO)
self._logger = logging.getLogger()
schedule_builder = TestingScheduleBuilder(test_file)
schedule_builder._find_matches()
assert "Unused student order found" in caplog.text
def test_find_matches_unused_order_not_found(tmp_path, caplog):
test_file = str(tmp_path.joinpath("data1.xlsx"))
data_1 = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 1", "test 2", "test 1", "test 2"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = {
"block": [1, 1, 2, 2],
"class": [
"test class 1",
"test class 1",
"test class 2",
"test class 2",
],
"student": ["test 2", "test 1", "test 2", "test 1"],
}
df_2 = pd.DataFrame(data_2)
class TestingScheduleBuilder(ScheduleBuilder):
def __init__(self, schedule_file_path):
self.final_schedule_df = None
self._schedule_df = self._load_data(schedule_file_path)
self._attempted_df = [df_1, df_2]
self._attempt = 1
self._verbose = True
logging.basicConfig(format="%(asctime)s: %(levelname)s: %(message)s")
logging.root.setLevel(level=logging.INFO)
self._logger = logging.getLogger()
schedule_builder = TestingScheduleBuilder(test_file)
schedule_builder._find_matches()
assert "No unused matches found" in caplog.text
def test_find_matches_retry(student_matches_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder._find_matches()
matches = schedule_builder._find_matches()
m_keys = [x.keys() for x in matches]
s_keys = [x.keys() for x in student_matches_check]
m_vals = [[[sorted(z) for z in y] for y in (list(x.values()))] for x in matches]
s_vals = [[[sorted(z) for z in y] for y in (list(x.values()))] for x in student_matches_check]
assert m_keys == s_keys
assert m_vals == s_vals
def test_get_class_size(class_size_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
class_size = schedule_builder._get_class_size()
assert class_size == class_size_check
def test_get_student_classes(student_classes_check, test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
student_classes = schedule_builder._get_student_classes()
assert student_classes == student_classes_check
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_get_total_classes(class_size_check, reduce_by, smallest_allowed, test_schedule):
reduced_classes = reduce_classes_check(reduce_by, smallest_allowed, class_size_check)
check_total_classes = total_classes_check(reduced_classes)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
total_classes = schedule_builder._get_total_classes(reduced_classes)
assert total_classes == check_total_classes
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_init_classes(class_size_check, reduce_by, smallest_allowed, test_schedule):
expected = init_classes_check(class_size_check, reduce_by, smallest_allowed)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
classes = schedule_builder._init_classes(reduce_by, smallest_allowed)
assert classes == expected
def test_init_schedule_builder(test_schedule):
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
test = pd.read_excel(str(test_schedule), engine="openpyxl")
assert test.equals(schedule_builder._schedule_df)
@pytest.mark.parametrize("reduce_by", [0.1, 0.2, 0.5])
@pytest.mark.parametrize("smallest_allowed", [1, 5, 10])
def test_reduce_class(class_size_check, reduce_by, smallest_allowed, test_schedule):
check_reduced = reduce_classes_check(reduce_by, smallest_allowed, class_size_check)
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
reduced_class = schedule_builder._reduce_class(class_size_check, reduce_by, smallest_allowed)
assert reduced_class == check_reduced
def test_save_schedule_to_file(tmp_path, test_schedule):
export_path = tmp_path.joinpath("schedule.xlsx")
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder.save_schedule(export_path)
assert export_path.exists()
def test_save_schedule_check_columns(tmp_path, test_schedule):
export_path = tmp_path.joinpath("schedule.xlsx")
schedule_builder = ScheduleBuilder()
schedule_builder.build_schedule_from_file(test_schedule)
schedule_builder.save_schedule(export_path)
df_saved = pd.read_excel(export_path, engine="openpyxl")
columns = df_saved.columns.values.tolist()
assert columns == [
"block",
"class",
"total_students",
"max_students",
"num_classes",
"day_number",
"student",
]
def test_validate_class_size_pass(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
],
"class": [
"test class 1",
"test class 1",
],
"total_students": [
2,
2,
],
"max_students": [
2,
2,
],
"num_classes": [
1,
1,
],
"day_number": [
1,
1,
],
"student": [
"test 1",
"test 2",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate_df = schedule_builder._validate_class_size(df)
assert not validate_df
def test_validate_class_size_fail(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data = {
"block": [
1,
1,
],
"class": [
"test class 1",
"test class 1",
],
"total_students": [
2,
2,
],
"max_students": [
1,
1,
],
"num_classes": [
1,
1,
],
"day_number": [
1,
1,
],
"student": [
"test 1",
"test 2",
],
}
df = pd.DataFrame(data)
df.to_excel(test_file, index=False, engine="openpyxl")
schedule_builder = ScheduleBuilder()
validate_df = schedule_builder._validate_class_size(df)
expected_df = pd.DataFrame(
{
"block": [1],
"class": ["test class 1"],
"max_students": [1],
"day_number": [1],
"class_size": [2],
}
)
assert expected_df.equals(validate_df)
def test_validate_classes_pass(tmp_path):
test_file = str(tmp_path.joinpath("test.xlsx"))
data_1 = {
"block": [1, 2, 3, 1, 2],
"class": ["test class 1", "test class 2", "test class 3", "test class 1", "test class 2"],
"student": ["test 1", "test 1", "test 1", "test 2", "test 2"],
}
df_1 = pd.DataFrame(data_1)
df_1.to_excel(test_file, index=False, engine="openpyxl")
data_2 = data_1 = {
"block": [1, 3, 2, 1, 2],
"class": [
"test class 1",
"test class 3",
"test class 2",
"test class 1",
"test class 2",
],
"student": ["test 1", "test 1", "test 1", "test 2", "test 2"],
}
df_2 = | pd.DataFrame(data_2) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This script performs statistical analysis on instabilities and outputs figures
and a html with stats
"""
import os
import sys
import itertools
import glob
import numpy as np
import pandas as pd
import math
from scipy import stats
import pingouin as pg
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
print("Performing statistical analysis")
# =============================================================================
# Setup
# =============================================================================
# Run matplotlib inline
get_ipython().run_line_magic('matplotlib', 'inline')
# Define filepaths
HOMEDIR = os.path.abspath(os.path.join(__file__, "../..")) + "/"
WORKDIR = HOMEDIR + "data/instabilities/"
OUTDIR = HOMEDIR + "data/stats/"
# Settings
SAVE = 1 # Save outputs or not
MAX_TIME = 23 # Maximum number of snapshot pairs
TAU_CUTOFF = 20 # Maximum tau value that will be displayed
TAU_PICK = 1 # Tau to visualize on barplots
# Runs to include
subjects = [f"sub{int(subid):0>3}" for subid in sys.argv[1:]] # Subject IDs
# from bash script
task = "rest"
boluses = ["BHB", "GLC"]
PrePost = ["pre", "post"]
taus = np.arange(1, TAU_CUTOFF+1)
times = np.arange(1, MAX_TIME+1)
N = len(subjects)
# HTML formatting settings
#- Color
cm = sns.cubehelix_palette(as_cmap=True, dark=0.5)
#- Set CSS properties for th elements in dataframe (table header)
th_props = [
('font-size', '16px'),
('text-align', 'center'),
('font-weight', 'bold'),
('color', '#6d6d6d'),
('background-color', '#f7f7f9'),
('border-witdth', '12px')
]
#- Set CSS properties for td elements in dataframe (table data)
td_props = [
('font-size', '14px'),
('color', '#000000'),
]
#- Set table styles
styles = [
dict(selector="th", props=th_props),
dict(selector="td", props=td_props)
]
# Matplotlib settings
FORMAT = ".pdf" # File format to save in
plt.rcParams["font.family"] = "DejaVu Sans"
plt.rcParams["font.weight"] = "bold"
plt.rcParams['text.color'] = "black"
plt.rcParams['axes.labelcolor'] = "black"
plt.rcParams['xtick.color'] = "black"
plt.rcParams['ytick.color'] = "black"
# P value formatting
LIM1 = 0.05 # P value lim1
LIM2 = 0.01 # P value lim1
LIM3 = 0.001 # P value lim1
# This function is used for rounding p values for the plots
def round_p_value(pval):
if pval >= 0.01:
return round(pval, 2)
else:
return round(pval, 3)
# Initiate method for slicing multiindex pd dataframes
idx = pd.IndexSlice
# =========================================================================
# Load and refine instabilities
# =========================================================================
# Load computed instabilities
data_instabs = pd.read_csv(sorted(glob.glob(os.path.join(
WORKDIR, "instabilities_*")))[-1],
header=0, index_col=0)
# Initiate big dataframe
data_long = []
# General labels for big dataframe
gen_labs = list(itertools.product(boluses, PrePost, taus, times))
# Loop through all
for s, sub in enumerate(subjects):
# Cast subject and task indentifiers to lists
subject_IV = [int(sub[-3:])]*len(gen_labs)
task_IV = [task]*len(gen_labs)
# Extract instabilities belonging to a specific (sub)network
new_item = data_instabs \
.query(f'(subject == "{sub}") & (tau <= {TAU_CUTOFF})') \
.loc[:, "whole"] \
.to_frame()
# Construct indexes for dataframe
indexes = pd.MultiIndex.from_tuples(list(zip(subject_IV,
task_IV, *(zip(*gen_labs)))),
names=["subject", "task", "bolus",
"PrePost", "tau", "time"])
# Add indexes to dataframe
new_item = new_item.set_index(indexes)
# Add current instabilities to the big dataframe
data_long.append(new_item)
# Finalize big dataframe
data_long = | pd.concat(data_long, axis=0, ignore_index=False) | pandas.concat |
import time
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras import backend as K
import models as models
import helper as helper
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, TensorBoard
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.feature_selection import RFE
from tensorflow.keras.layers import Dense, Dropout, LSTM, Embedding, Input, concatenate, Lambda, BatchNormalization
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorboard.plugins.hparams import api as hp
from tqdm import tqdm
import time
LOGDIR = f"logs/"
HP_NUM_UNITS = hp.HParam('num_units', hp.Discrete([64, 32]))
HP_DROPOUT = hp.HParam('dropout', hp.RealInterval(0.1, 0.2))
HP_L2 = hp.HParam('l2', hp.RealInterval(0.001, 0.01))
HP_OPTIMIZER = hp.HParam('optimizer', hp.Discrete(['adam', 'sgd']))
HP_LOSS = hp.HParam('loss', hp.Discrete(['categorical_crossentropy']))
METRIC_ACCURACY = 'accuracy'
### Gather and prepare data
dfs = []
for i in range(2010, 2021):
dfs.append(pd.read_csv(f'./stats/seasons/{str(i) + str(i+1)}/{str(i) + str(i+1)}_done.csv', sep=';'))
# Append all df's in dfs to df
df = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import numpy as np
import datetime
import calendar
from math import e
from brightwind.analyse import plot as plt
# noinspection PyProtectedMember
from brightwind.analyse.analyse import dist_by_dir_sector, dist_12x24, coverage, _convert_df_to_series
from ipywidgets import FloatProgress
from IPython.display import display
from IPython.display import clear_output
import re
import warnings
pd.options.mode.chained_assignment = None
__all__ = ['Shear']
class Shear:
class TimeSeries:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', max_plot_height=None,
maximise_data=False):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for each timestamp
of a wind series.
:param wspds: pandas DataFrame, list of pandas.Series or list of wind speeds to be used for calculating shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights.
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3.
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param max_plot_height: height to which the wind profile plot is extended.
:type max_plot_height: float
:param maximise_data: If maximise_data is True, calculations will be carried out on all data where two or
more anemometers readings exist for a timestamp. If False, calculations will only be
carried out on timestamps where readings exist for all anemometers.
:type maximise_data: Boolean
:return TimeSeries object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype TimeSeries object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights, maximise_data=True)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law',
max_plot_height=120)
# Get the alpha or roughness values calculated
timeseries_power_law.alpha
timeseries_log_law.roughness
# View plot
timeseries_power_law.plot
timeseries_log_law.plot
# View input anemometer data
timeseries_power_law.wspds
timeseries_log_law.wspds
# View other information
pprint.pprint(timeseries_power_law.info)
pprint.pprint(timeseries_log_law.info)
"""
print('This may take a while...')
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed, maximise_data=maximise_data)
if calc_method == 'power_law':
alpha_c = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_power_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
alpha = pd.Series(alpha_c.iloc[:, 0], name='alpha')
self._alpha = alpha
elif calc_method == 'log_law':
slope_intercept = (wspds[(wspds > min_speed).all(axis=1)].apply(Shear._calc_log_law, heights=heights,
return_coeff=True,
maximise_data=maximise_data, axis=1))
slope = slope_intercept.iloc[:, 0]
intercept = slope_intercept.iloc[:, 1]
roughness_coefficient = pd.Series(Shear._calc_roughness(slope=slope, intercept=intercept),
name='roughness_coefficient')
self._roughness = roughness_coefficient
clear_output()
avg_plot = Shear.Average(wspds=wspds, heights=heights, calc_method=calc_method,
max_plot_height=max_plot_height)
self.origin = 'TimeSeries'
self.calc_method = calc_method
self.wspds = wspds
self.plot = avg_plot.plot
self.info = Shear._create_info(self, heights=heights, cvg=cvg, min_speed=min_speed)
@property
def alpha(self):
return self._alpha
@property
def roughness(self):
return self._roughness
def apply(self, wspds, height, shear_to):
""""
Applies shear calculated to a wind speed time series and scales wind speed from one height to
another for each matching timestamp.
:param self: TimeSeries object to use when applying shear to the data.
:type self: TimeSeries object
:param wspds: Wind speed time series to apply shear to.
:type wspds: pandas.Series
:param height: height of above wspds.
:type height: float
:param shear_to: height to which wspds should be scaled to.
:type shear_to: float
:return: a pandas.Series of the scaled wind speeds.
:rtype: pandas.Series
**Example Usage**
::
import brightwind as bw
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Get power law object
timeseries_power_law = bw.Shear.TimeSeries(anemometers, heights)
timeseries_log_law = bw.Shear.TimeSeries(anemometers, heights, calc_method='log_law')
# Scale wind speeds using calculated exponents
timeseries_power_law.apply(data['Spd40mN'], height=40, shear_to=70)
timeseries_log_law.apply(data['Spd40mN'], height=40, shear_to=70)
"""
return Shear._apply(self, wspds, height, shear_to)
class TimeOfDay:
def __init__(self, wspds, heights, min_speed=3, calc_method='power_law', by_month=True, segment_start_time=7,
segments_per_day=24, plot_type='line'):
"""
Calculates alpha, using the power law, or the roughness coefficient, using the log law, for a wind series
binned by time of the day and (optionally by) month, depending on the user's inputs. The alpha/roughness
coefficient values are calculated based on the average wind speeds at each measurement height in each bin.
:param wspds: pandas.DataFrame, list of pandas.Series or list of wind speeds to be used for calculating
shear.
:type wspds: pandas.DataFrame, list of pandas.Series or list.
:param heights: List of anemometer heights..
:type heights: list
:param min_speed: Only speeds higher than this would be considered for calculating shear, default is 3
:type min_speed: float
:param calc_method: method to use for calculation, either 'power_law' (returns alpha) or 'log_law'
(returns the roughness coefficient).
:type calc_method: str
:param by_month: If True, calculate alpha or roughness coefficient values for each daily segment and month.
If False, average alpha or roughness coefficient values are calculated for each daily
segment across all months.
:type by_month: Boolean
:param segment_start_time: Starting time for first segment.
:type segment_start_time: int
:param segments_per_day: Number of segments into which each 24 period is split. Must be a divisor of 24.
:type segments_per_day: int
:param plot_type: Type of plot to be generated. Options include 'line', 'step' and '12x24'.
:type plot_type: str
:return: TimeOfDay object containing calculated alpha/roughness coefficient values, a plot
and other data.
:rtype: TimeOfDay object
**Example usage**
::
import brightwind as bw
import pprint
# Load anemometer data to calculate exponents
data = bw.load_csv(C:\\Users\\Stephen\\Documents\\Analysis\\demo_data)
anemometers = data[['Spd80mS', 'Spd60mS','Spd40mS']]
heights = [80, 60, 40]
# Using with a DataFrame of wind speeds
timeofday_power_law = bw.Shear.TimeOfDay(anemometers, heights, daily_segments=2, segment_start_time=7)
timeofday_log_law = bw.Shear.TimeOfDay(anemometers, heights, calc_method='log_law', by_month=False)
# Get alpha or roughness values calculated
timeofday_power_law.alpha
timeofday_log_law.roughness
# View plot
timeofday_power_law.plot
timeofday_log_law.plot
# View input data
timeofday_power_law.wspds
timeofday_log_law.wspds
# View other information
pprint.pprint(timeofday_power_law.info)
pprint.pprint(timeofday_log_law.info)
"""
wspds, cvg = Shear._data_prep(wspds=wspds, heights=heights, min_speed=min_speed)
# initialise empty series for later use
start_times = pd.Series([])
time_wspds = pd.Series([])
mean_time_wspds = pd.Series([])
c = pd.Series([])
slope = pd.Series([])
intercept = pd.Series([])
alpha = pd.Series([])
roughness = pd.Series([])
slope_df = pd.DataFrame([])
intercept_df = | pd.DataFrame([]) | pandas.DataFrame |
#!/usr/bin/env python
import pandas as pd
import numpy as np
import scipy, sklearn, os, sys, string, fileinput, glob, re, math, itertools, functools
import copy, multiprocessing, traceback, logging, pickle
import scipy.stats, sklearn.decomposition, sklearn.preprocessing, sklearn.covariance
from scipy.stats import describe
from scipy import sparse
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
from collections import defaultdict
from tqdm import tqdm
def read_raw_files_and_write_h5_files(outdir):
import utils
for pid in ['180430_1','180430_5','180430_6']:
adata1 = utils.SlideSeq.loadRawData("/afs/csail.mit.edu/u/r/rsingh/work/afid/data/slideseq/raw/", pid, 100)
adata1.write("/afs/csail.mit.edu/u/r/rsingh/work/afid/data/slideseq/processed/puck_{0}.h5ad".format(pid))
def computeKernelDensityGranuleCells(adata1, kd_fit_granule_only=True, kd_bw=125):
from sklearn.neighbors import KernelDensity
fscl = lambda v: 20*(sklearn.preprocessing.MinMaxScaler().fit_transform(np.exp(v[:,None]-v.min()))).ravel()
d3 = adata1.obs.copy(deep=True) #adata1.uns["Ho"].merge(adata1.obs, how="inner", left_index=True, right_index=True)
d3c = d3[["xcoord","ycoord"]]
if kd_fit_granule_only:
d3["kd"] = fscl(KernelDensity(kernel='gaussian', bandwidth=kd_bw).fit(d3c[d3["atlas_cluster"]==1].values).score_samples(d3c.values))
else:
d3["kd"] = fscl(KernelDensity(kernel='gaussian', bandwidth=kd_bw).fit(d3c.values).score_samples(d3c.values))
adata1.obs["kd"] = d3["kd"]
return adata1
def checkMaxFeasibleCorr(D, d0, g, tg, wg):
try:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
import schema_qp
except:
from schema import schema_qp
for thresh in [0.30, 0.275, 0.25, 0.225, 0.20, 0.15, 0.10, 0.075, 0.06, 0.05, 0.04, 0.03, 0.025, 0.02, 0.015, 0.01]:
print ("STARTING TRY OF ", thresh)
try:
sqp = schema_qp.SchemaQP(thresh, w_max_to_avg=1000, params= {"dist_npairs": 1000000}, mode="scale")
dz1 = sqp.fit(D, g, tg, wg, d0=d0)
print ("SUCCEEDED TRY OF ", thresh)
return 0.9*thresh, thresh
except:
print ("EXCEPTION WHEN TRYING ", thresh)
#raise
return 0,0
def runSchemaGranuleCellDensity(D, d0, gIn, tgIn, wgIn, min_corr1, min_corr2):
try:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
import schema_qp
except:
from schema import schema_qp
f_linear = lambda v:v
ret_val = {}
w_list= [1,10,50,100]
for w in w_list:
s="linear"
f=f_linear
g1, wg1, tg1 = gIn[:], wgIn[:], tgIn[:] # does maximize negative corr with non-granule
wg1[0] = w
g = [g1[0]]; wg = [wg1[0]]; tg=[tg1[0]] # does NOT maximize negative corr with non-granule
#afx0 = schema_qp.SchemaQP(0.001, 1000, mode="scale")
#Dx0 = afx0.fit_transform(D,g,tg,wg,d0)
#ret_val[(s,w,0)] = (np.sqrt(afx0._wts), afx0._soln_info)
try:
afx1 = schema_qp.SchemaQP(min_corr1, w_max_to_avg=1000, mode="scale")
Dx1 = afx1.fit_transform(D,g,tg,wg,d0=d0)
ret_val[(s,w,1)] = (np.sqrt(afx1._wts), afx1._soln_info ) # does NOT maximize negative corr with non-granule
except:
print("TRYING min-corr {0} for afx1 broke here".format(min_corr1))
continue
try:
afx2 = schema_qp.SchemaQP(min_corr1, w_max_to_avg=1000, mode="scale")
Dx2 = afx2.fit_transform(D,g1,tg1,wg1,d0=d0)
ret_val[(s,w,2)] = (np.sqrt(afx2._wts), afx2._soln_info) # does maximize negative corr with non-granule
except:
print("TRYING min-corr {0} for afx2 broke here".format(min_corr1))
continue
try:
afx3 = schema_qp.SchemaQP(min_corr2, w_max_to_avg=1000, mode="scale")
Dx3 = afx3.fit_transform(D,g1,tg1,wg1,d0=d0) # does maximize negative corr with non-granule
ret_val[(s,w,3)] = (np.sqrt(afx3._wts), afx3._soln_info)
except:
print("TRYING min-corr {0} for afx3 broke here".format(min_corr2))
continue
return ret_val
def getSoftmaxCombinedScores(Wo, schema_ret, use_generanks=True, do_pow2=True, schema_allowed_w1s=None):
if use_generanks:
R = Wo.rank(axis=1, pct=True).values.T
else:
R = Wo.values.T
sumr = None; nr=0
for x in schema_ret:
style,w,i = x
if style!="linear" or i not in [2,3]: continue #2,3 correspond to schema runs that also require disagreement w other altas clusters
if schema_allowed_w1s is not None and w not in schema_allowed_w1s: continue
wx = schema_ret[x][0]**2
#wx = wx/np.sum(wx)
schema_wts = wx**(2 if do_pow2 else 1)
if np.max(schema_wts) > 20:
schema_wts = 20*schema_wts/np.max(schema_wts)
schema_probs = np.exp(schema_wts)/np.sum(np.exp(schema_wts))
g1 = (R*schema_probs).sum(axis=1)
g2 = g1/np.std(g1.ravel())
#g2 = scipy.stats.rankdata(g1); g2 = g2/np.max(g2)
if sumr is None:
sumr = g2
else:
sumr += g2
nr += 1
rnks = sumr/nr
s1= pd.Series(rnks, index=list(Wo.columns)).sort_values(ascending=False)
return {u:(i+1) for i,u in enumerate(list(s1.index))}
def getCellLoadingSoftmax(d3, Wo, schema_ret):
R = Wo.values.T
sumr = None; nr=0
for x in schema_ret:
style,w,i = x
if style!="linear" or i not in [2,3]: continue
wx = schema_ret[x][0]**2
#wx = wx/np.sum(wx)
schema_wts = wx
if np.max(schema_wts) > 20:
schema_wts = 20*schema_wts/np.max(schema_wts)
schema_probs = np.exp(schema_wts)/np.sum(np.exp(schema_wts))
if sumr is None:
sumr = schema_probs
else:
sumr += schema_probs
nr += 1
r = sumr/nr
v = (d3.iloc[:,:100].multiply(r,axis=1)).sum(axis=1)
return v.values
def generatePlotGranuleCellDensity(d3, cell_loadings):
import matplotlib.pyplot as plt
import seaborn as sns
score1 = cell_loadings
np.random.seed(239)
plt.style.use('seaborn-paper')
plt.rcParams['lines.markersize'] = np.sqrt(0.25)
fc = lambda v: np.where(v,'lightslategray','red')
#fig = plt.figure(constrained_layout=True, figsize=(6.48,2.16), dpi=300) #(2*6.48,2*2.16))
fig = plt.figure(figsize=(6.48,2.16), dpi=300) #(2*6.48,2*2.16))
gs = fig.add_gridspec(2,6,wspace=0,hspace=0) #(2, 2)
idxY=d3["atlas_cluster"]==1
idxN=d3["atlas_cluster"]!=1
coords = d3.loc[:,["xcoord","ycoord"]].values
clstr = d3["atlas_cluster"].values
cid_list = [1,2,3,6]
fc = lambda v: np.where(v,'lightslategray','red')
axdict = {}
xyL = [(gs[0,0], coords[clstr==1,:], 'a','Granule Cells'), (gs[0,1], coords[clstr==2,:],'b','Purkinje Cells'),
(gs[1,0], coords[clstr==3,:], 'c','Interneuron'), (gs[1,1], coords[clstr==6,:], 'd','Oligodendrocytes')]
for g, dx, titlestr, desc in xyL:
ax = fig.add_subplot(g)
fc = lambda v: np.where(v,'lightslategray','red')
ax.text(.95, .05, titlestr, horizontalalignment='center', transform=ax.transAxes, size=14 )
ax.axis('off')
ax.scatter(dx[:,0], dx[:,1], color='black', alpha=0.20 if titlestr=='a' else 0.6 )
ax.set_aspect('equal')
axdict[titlestr] = ax
ax = fig.add_subplot(gs[:,2:4])
im = ax.scatter(coords[clstr==1,0],coords[clstr==1,1],c=2*d3["kd"].values[clstr==1],cmap="seismic",s=1)
#im = ax.scatter(coords[:,0],coords[:,1],c=2*d3["kd"].values,cmap="seismic",s=1)
ax.set_aspect('equal')
ax.axis('off')
from mpl_toolkits.axes_grid1 import make_axes_locatable
div = make_axes_locatable(ax)
cax = div.append_axes("bottom", size="3%", pad=0.01)
cbar = fig.colorbar(im, cax=cax, shrink=0.2, orientation='horizontal')
ax.text(.9, .05, "e", horizontalalignment='center', transform=ax.transAxes, size=14 )
sx = score1 > np.quantile(score1,0.75)
for g, titlestr, ii, c1 in [(gs[0,4], "f", idxY & sx, 'r'), (gs[0,5], "g", idxY & (~sx), 'b'),
(gs[1,4], "h", idxN & sx, 'r'), (gs[1,5], "i", idxN & (~sx), 'b')]:
ax = fig.add_subplot(g)
ax.text(.95, .05, titlestr, horizontalalignment='center', transform=ax.transAxes, size=14 )
#ax.axes.get_xaxis().set_visible(False)
#ax.axes.get_yaxis().set_visible(False)
ax.axis('off')
ax.scatter(coords[ii,0], coords[ii,1], color=c1, alpha=0.40 )
ax.set_aspect('equal')
axdict[titlestr] = ax
####################################
fig.tight_layout()
return fig
def processGranuleCellDensitySchema(adata1, extra_args):
if "kd" not in adata1.obs.columns:
adata1 = computeKernelDensityGranuleCells(adata1,
kd_fit_granule_only = int(extra_args.get("kd_fit_granule_only",1))==1,
kd_bw = float(extra_args.get("kd_bw",125)))
d3 = adata1.uns["Ho"].merge(adata1.obs, how="inner", left_index=True, right_index=True)
Wo = adata1.uns["Wo"]
cols_Ho = list(adata1.uns["Ho"].columns)
D = d3[cols_Ho].values
d0 = 1*(d3["atlas_cluster"].values==1)
g = [(d3["kd"].values)]; wg=[10]; tg=["numeric"]
for clid in [2,3,6,7]:
g.append(1*(d3["atlas_cluster"].values==clid))
wg.append(-1)
tg.append("categorical")
min_corr1, min_corr2 = checkMaxFeasibleCorr(D, d0, g, tg, wg)
schema_ret = runSchemaGranuleCellDensity(D, d0, g, tg, wg, min_corr1, min_corr2)
scores = getSoftmaxCombinedScores(Wo, schema_ret, use_generanks=False, do_pow2=False)
cell_loadings = getCellLoadingSoftmax(d3, Wo, schema_ret)
fig = generatePlotGranuleCellDensity(d3, cell_loadings)
return (fig, d3, schema_ret, min_corr1, min_corr2, scores, cell_loadings)
def doSchemaCCA_CellScorePlot2(d3, cca_x_scores, cell_loadings):
clstrs = d3["atlas_cluster"]
kd = d3["kd"]
cca_sgn = np.sign(scipy.stats.pearsonr(d3["kd"],cca_x_scores)[0]) #flip signs if needed
R = {}
for desc,v in [("ccax", cca_sgn*cca_x_scores), ("schema", cell_loadings)]:
vr = scipy.stats.rankdata(v)
vr = vr/vr.max()
l = []
for t in np.linspace(0,1,100)[:-1]:
cx = clstrs[vr >=t ]
granule_frac = (np.sum(cx==1)/(1e-12+ len(cx)))
cx2 = kd[ vr >= t]
kd_val = np.median(cx2)
l.append((granule_frac, kd_val))
R[desc]= list(zip(*l))
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
plt.rcParams['lines.markersize'] = np.sqrt(0.25)
fig = plt.figure(dpi=300) #(2*6.48,2*2.16))
a = np.linspace(0,1,100)
plt.scatter(R["ccax"][0], R["ccax"][1], s=(1+3*a)**2, c="red", figure=fig)
plt.scatter(R["schema"][0], R["schema"][1], s=(1+3*a)**2, c="blue", figure=fig)
fig.legend("CCA fit,Schema fit".split(","))
plt.xlabel("Fraction of Beads labeled as Granule Cells", figure=fig)
plt.ylabel("Median Kernel Density Score", figure=fig)
return fig
#################################################################################
if __name__ == "__main__":
try:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
from utils import SlideSeq
except:
from schema.utils import SlideSeq
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--mode", help="which code path to run. see main(..) for details")
parser.add_argument("--outdir", help="output directory (can set to '.')", type=str, default=".")
parser.add_argument("--outpfx", help="prefix to use when producing output files")
parser.add_argument("--style", help="mode-specific interpretation", type=int, default=-1)
parser.add_argument("--infile", help="input .h5ad file. Default is SlideSeq 180430_1 h5ad")
parser.add_argument("--njobs", help="number of parallel cores to use", type=int, default=24)
parser.add_argument("--extra", help="put this as the LAST option and arbitrary space-separated key=val pairs after that", type=str, nargs='*')
args = parser.parse_args()
assert args.mode is not None
if args.mode !="raw_data_read": assert args.outpfx is not None
if args.infile is None:
args.infile = "/afs/csail.mit.edu/u/r/rsingh/work/schema/data/slideseq/processed/puck_180430_1.h5ad"
extra_args = dict([a.split("=") for a in args.extra]) if args.extra else {}
if args.mode== "raw_data_read":
read_raw_files_and_write_h5_files( args.outdir)
if args.mode == "schema_kd_granule_cells":
adata1 = SlideSeq.loadAnnData(args.infile)
try:
from schema import schema_qp
except:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
import schema_qp
schema_qp.schema_loglevel = logging.WARNING
fig, d3, schema_ret, min_corr1, min_corr2, scores, cell_loadings = processGranuleCellDensitySchema(adata1, extra_args)
fig.tight_layout()
fig.savefig("{0}_fig-KD.png".format(args.outpfx), dpi=300)
fig.savefig("{0}_fig-KD.svg".format(args.outpfx))
pickle.dump((d3[["xcoord","ycoord","kd","atlas_cluster"]], schema_ret, min_corr1, min_corr2, scores, cell_loadings),
open("{0}_func_output.pkl".format(args.outpfx), "wb"))
if args.mode == "cca_kd_granule_cells":
adata1 = SlideSeq.loadAnnData(args.infile)
if "kd" not in adata1.obs.columns:
adata1 = computeKernelDensityGranuleCells(adata1,
kd_fit_granule_only = int(extra_args.get("kd_fit_granule_only",1))==1,
kd_bw = float(extra_args.get("kd_bw",125)))
from sklearn.cross_decomposition import CCA
cca = CCA(1)
cca.fit(adata1.X, adata1.obs["kd"])
cca_sgn = np.sign(scipy.stats.pearsonr(adata1.obs["kd"], cca.x_scores_[:,0])[0]) #flip signs if needed
fig = generatePlotGranuleCellDensity(adata1.obs, cca_sgn*cca.x_scores_[:,0])
fig.tight_layout()
fig.savefig("{0}_fig-CCA.png".format(args.outpfx), dpi=300)
fig.savefig("{0}_fig-CCA.svg".format(args.outpfx))
pickle.dump((adata1.obs[["xcoord","ycoord","kd","atlas_cluster"]], cca.x_scores_[:,0], cca.x_loadings_[:,0], cca.y_scores_[:,0]),
open("{0}_CCA_output.pkl".format(args.outpfx), "wb"))
if args.mode == "cca2step_kd_granule_cells":
adata1 = SlideSeq.loadAnnData(args.infile)
if "kd" not in adata1.obs.columns:
adata1 = computeKernelDensityGranuleCells(adata1,
kd_fit_granule_only = int(extra_args.get("kd_fit_granule_only",1))==1,
kd_bw = float(extra_args.get("kd_bw",125)))
#### adata1 = adata1[:,:40] ## FOR TESTING
from sklearn.cross_decomposition import CCA
cca1 = CCA(1)
cca1.fit(adata1.X, adata1.obs["kd"])
cca1_sgn = np.sign(scipy.stats.pearsonr(adata1.obs["kd"],cca1.x_scores_[:,0])[0]) #flip signs if needed
cca2 = CCA(1)
cca2.fit(adata1.X, 1*(adata1.obs["atlas_cluster"]==1))
cca2_sgn = np.sign(scipy.stats.pearsonr(1*(adata1.obs["atlas_cluster"]==1),cca2.x_scores_[:,0])[0]) #flip signs if needed
score1 = cca1_sgn*cca1.x_scores_[:,0]
score2 = cca2_sgn*cca2.x_scores_[:,0]
scorex = 0.5 * (score1/np.std(score1) + score2/np.std(score2))
scorex = scorex/np.sqrt(np.sum(scorex**2))
loadings = np.matmul(np.transpose(adata1.X), scorex)
intcpt = 0
print("Flag 2320.01 ", scorex.shape, adata1.X.shape, loadings.shape, describe(scorex), describe(loadings))
fig = generatePlotGranuleCellDensity(adata1.obs, scorex)
fig.tight_layout()
fig.savefig("{0}_fig-CCA2STEP.png".format(args.outpfx), dpi=300)
fig.savefig("{0}_fig-CCA2STEP.svg".format(args.outpfx))
pickle.dump((adata1.obs[["xcoord","ycoord","kd","atlas_cluster"]], scorex, loadings, intcpt),
open("{0}_CCA2STEP_output.pkl".format(args.outpfx), "wb"))
if args.mode == "cca_schema_comparison_plot":
cca_pkl_file = extra_args["cca_pkl_file"]
schema_pkl_file = extra_args["schema_pkl_file"]
cca_d3, cca_x_scores, _ , _ = pickle.load(open(cca_pkl_file,"rb"))
schema_d3, _, _, _, _, cell_loadings = pickle.load(open(schema_pkl_file,"rb"))
#fig = doSchemaCCA_CellScorePlot(cca_d3, cca_x_scores, cell_loadings)
fig = doSchemaCCA_CellScorePlot2(cca_d3, cca_x_scores, cell_loadings)
fig.savefig("{0}_fig-Schema-CCA-cmp.png".format(args.outpfx), dpi=300)
fig.savefig("{0}_fig-Schema-CCA-cmp.svg".format(args.outpfx))
if args.mode == "generate_multipuck_gene_ranks":
pkl_file_glob = extra_args["pkl_file_glob"]
assert extra_args["data_type"].lower() in ["schema","cca"]
data_type = extra_args["data_type"].lower()
pkl_flist = glob.glob(pkl_file_glob)
print("Flag 67.10 ", pkl_flist)
assert len(pkl_flist) > 0
L = []
for f in pkl_flist:
if data_type == "schema":
_, _, _, _, scores, _ = pickle.load(open(f,"rb"))
L.append([a[0] for a in sorted(scores.items(), key=lambda v:v[1])]) #in schema rankings, low number means top-rank
elif data_type == "cca":
d3, cca_x_scores, cca_x_loadings, _ = pickle.load(open(f,"rb"))
cca_sgn = np.sign(scipy.stats.pearsonr(d3["kd"],cca_x_scores)[0])
puckid = f[f.index("180430"):][:8]
adata1 = SlideSeq.loadAnnData("{0}/puck_{1}.h5ad".format(os.path.dirname(f), puckid))
df = pd.DataFrame.from_dict({"gene": list(adata1.uns["Wo"].columns), "cca_scores": cca_sgn*cca_x_loadings})
df = df.sort_values("cca_scores", ascending=False)
L.append(list(df.gene.values))
Nmax = max(len(a) for a in L)
print ("Flag 67.40 ", len(L), len(L[0]), Nmax)
cons_score = {}
active_set = set()
for i in range(1,Nmax+1):
currset = set.intersection(*[set(a[:i]) for a in L])
if len(currset) > len(active_set):
for s in currset-active_set:
cons_score[s] = len(currset)
active_set = currset
g = []; s = []
for k,v in cons_score.items():
g.append(k)
s.append(v)
| pd.DataFrame.from_dict({"gene": g, "rank": s}) | pandas.DataFrame.from_dict |
#!/usr/bin/env python
"""
Parsing GO Accession from a table file produced by InterProScan and mapping to GOSlim.
(c) <NAME> 2018 / MIT Licence
kinomoto[AT]sakura[DOT]idv[DOT]tw
"""
from __future__ import print_function
from os import path
import sys
import pandas as pd
from goatools.obo_parser import GODag
from goatools.mapslim import mapslim
from joblib import Parallel, delayed
import optparse
p = optparse.OptionParser("%prog [options] <eggnog_diamond_file> <go_obo_file>")
p.add_option("-o", "--out", dest="output_filename", help="Directory to store " "the output file [default: GO_term_annotation.txt]", action="store", type="string", default="GO_term_annotation.txt")
p.add_option("-g", "--goslim", dest="goslim_obo_file", action="store",
help="The .obo file for the most current GO Slim terms "
"[default: Null]", type="string", default=None)
p.add_option("-O", "--goslim_out", dest="goslim_output_filename", action="store", help="Directory to store the output file [default: " "GOSlim_annotation.txt]", type="string", default="GOSlim_annotation.txt")
p.add_option("-t", "--goslim_type", dest="goslim_type", action="store", type="string", default="direct", help="One of `direct` or `all`. Defines "
"whether the output should contain all GOSlim terms (all "
"ancestors) or only direct GOSlim terms (only direct "
"ancestors) [default: direct]")
p.add_option("-s", "--sort", dest="is_sort", action="store_true", default=False, help="Sort the output table [default: False]")
opts, args = p.parse_args()
# check for correct number of arguments
if len(args) != 2:
p.print_help()
sys.exit(1)
interpro_file = args[0]
assert path.exists(interpro_file), "file %s not found!" % interpro_file
obo_file = args[1]
assert path.exists(obo_file), "file %s not found!" % obo_file
# check that --goslim is set
USE_SLIM = False
if (opts.goslim_obo_file is not None):
assert path.exists(opts.goslim_obo_file), "file %s not found!" % opts.goslim_obo_file
USE_SLIM = True
# check that slim_out is either "direct" or "all" and set according flag
if opts.goslim_type.lower() == "direct":
ONLY_DIRECT = True
elif opts.goslim_type.lower() == "all":
ONLY_DIRECT = False
else:
p.print_help()
sys.exit(1)
# load InterProScan_tsv_file
interpro_table = | pd.read_csv(interpro_file, sep='\t',skiprows=3,skipfooter=3,engine='python') | pandas.read_csv |
# PyLS-PM Library
# Author: <NAME>
# Creation: November 2016
# Description: Library based on <NAME>'s simplePLS,
# <NAME>'s plspm and <NAME>'s matrixpls made in R
import pandas as pd
import numpy as np
import scipy as sp
import scipy.stats
from .qpLRlib4 import otimiza, plotaIC
import scipy.linalg
from collections import Counter
from .pca import *
from pandas.plotting import scatter_matrix
from .adequacy import *
class PyLSpm(object):
def PCA(self):
for i in range(self.lenlatent):
print(self.latent[i])
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
PCAdo(block, self.latent[i])
print('KMO')
print(KMO(block))
print('BTS')
print(BTS(block))
def scatterMatrix(self):
for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
scatter_matrix(block, diagonal='kde')
plt.savefig('imgs/scatter' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
def sampleSize(self):
r = 0.3
alpha = 0.05
# power=0.9
C = 0.5 * np.log((1 + r) / (1 - r))
Za = scipy.stats.norm.ppf(1 - (0.05 / 2))
sizeArray = []
powerArray = []
power = 0.5
for i in range(50, 100, 1):
power = i / 100
powerArray.append(power)
Zb = scipy.stats.norm.ppf(1 - power)
N = abs((Za - Zb) / C)**2 + 3
sizeArray.append(N)
return [powerArray, sizeArray]
def normaliza(self, X):
correction = np.sqrt((len(X) - 1) / len(X)) # std factor corretion
mean_ = np.mean(X, 0)
scale_ = np.std(X, 0)
X = X - mean_
X = X / (scale_ * correction)
return X
def gof(self):
r2mean = np.mean(self.r2.T[self.endoexo()[0]].values)
AVEmean = self.AVE().copy()
totalblock = 0
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = len(block.columns.values)
totalblock += block
AVEmean[self.latent[i]] = AVEmean[self.latent[i]] * block
AVEmean = np.sum(AVEmean) / totalblock
return np.sqrt(AVEmean * r2mean)
def endoexo(self):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
return endoVar, exoVar
def residuals(self):
exoVar = []
endoVar = []
outer_residuals = self.data.copy()
# comun_ = self.data.copy()
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = block.columns.values
loadings = self.outer_loadings.ix[
block][self.latent[i]].values
outer_ = self.fscores.ix[:, i].values
outer_ = outer_.reshape(len(outer_), 1)
loadings = loadings.reshape(len(loadings), 1)
outer_ = np.dot(outer_, loadings.T)
outer_residuals.ix[:, block] = self.data_.ix[
:, block] - outer_
# comun_.ix[:, block] = outer_
inner_residuals = self.fscores[endoVar]
inner_ = pd.DataFrame.dot(self.fscores, self.path_matrix.ix[endoVar].T)
inner_residuals = self.fscores[endoVar] - inner_
residuals = pd.concat([outer_residuals, inner_residuals], axis=1)
mean_ = np.mean(self.data, 0)
# comun_ = comun_.apply(lambda row: row + mean_, axis=1)
sumOuterResid = pd.DataFrame.sum(
pd.DataFrame.sum(outer_residuals**2))
sumInnerResid = pd.DataFrame.sum(
pd.DataFrame.sum(inner_residuals**2))
divFun = sumOuterResid + sumInnerResid
return residuals, outer_residuals, inner_residuals, divFun
def srmr(self):
srmr = (self.empirical() - self.implied())
srmr = np.sqrt(((srmr.values) ** 2).mean())
return srmr
def implied(self):
corLVs = pd.DataFrame.cov(self.fscores)
implied_ = pd.DataFrame.dot(self.outer_loadings, corLVs)
implied = pd.DataFrame.dot(implied_, self.outer_loadings.T)
implied.values[[np.arange(len(self.manifests))] * 2] = 1
return implied
def empirical(self):
empirical = self.data_
return pd.DataFrame.corr(empirical)
def frequency(self, data=None, manifests=None):
if data is None:
data = self.data
if manifests is None:
manifests = self.manifests
frequencia = pd.DataFrame(0, index=range(1, 6), columns=manifests)
for i in range(len(manifests)):
frequencia[manifests[i]] = data[
manifests[i]].value_counts()
frequencia = frequencia / len(data) * 100
frequencia = frequencia.reindex_axis(
sorted(frequencia.columns), axis=1)
frequencia = frequencia.fillna(0).T
frequencia = frequencia[(frequencia.T != 0).any()]
maximo = pd.DataFrame.max(pd.DataFrame.max(data, axis=0))
if int(maximo) & 1:
neg = np.sum(frequencia.ix[:, 1: ((maximo - 1) / 2)], axis=1)
ind = frequencia.ix[:, ((maximo + 1) / 2)]
pos = np.sum(
frequencia.ix[:, (((maximo + 1) / 2) + 1):maximo], axis=1)
else:
neg = np.sum(frequencia.ix[:, 1:((maximo) / 2)], axis=1)
ind = 0
pos = np.sum(frequencia.ix[:, (((maximo) / 2) + 1):maximo], axis=1)
frequencia['Neg.'] = pd.Series(
neg, index=frequencia.index)
frequencia['Ind.'] = pd.Series(
ind, index=frequencia.index)
frequencia['Pos.'] = pd.Series(
pos, index=frequencia.index)
return frequencia
def frequencyPlot(self, data_, SEM=None):
segmento = 'SEM'
SEMmax = pd.DataFrame.max(SEM)
ok = None
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block = pd.concat([block, SEM], axis=1)
for j in range(SEMmax + 1):
dataSEM = (block.loc[data_[segmento] == j]
).drop(segmento, axis=1)
block_val = dataSEM.columns.values
dataSEM = self.frequency(dataSEM, block_val)['Pos.']
dataSEM = dataSEM.rename(j + 1)
ok = dataSEM if ok is None else pd.concat(
[ok, dataSEM], axis=1)
for i in range(1, self.lenlatent):
block = data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
plotando = ok.ix[block_val].dropna(axis=1)
plotando.plot.bar()
plt.legend(loc='upper center',
bbox_to_anchor=(0.5, -.08), ncol=6)
plt.savefig('imgs/frequency' + self.latent[i], bbox_inches='tight')
plt.clf()
plt.cla()
# plt.show()
# block.plot.bar()
# plt.show()
'''for i in range(1, self.lenlatent):
block = self.data[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
block_val = block.columns.values
block = self.frequency(block, block_val)
block.plot.bar()
plt.show()'''
def dataInfo(self):
sd_ = np.std(self.data, 0)
mean_ = np.mean(self.data, 0)
skew = scipy.stats.skew(self.data)
kurtosis = scipy.stats.kurtosis(self.data)
w = [scipy.stats.shapiro(self.data.ix[:, i])[0]
for i in range(len(self.data.columns))]
return [mean_, sd_, skew, kurtosis, w]
def predict(self, method='redundancy'):
exoVar = []
endoVar = []
for i in range(self.lenlatent):
if(self.latent[i] in self.LVariables['target'].values):
endoVar.append(self.latent[i])
else:
exoVar.append(self.latent[i])
if (method == 'exogenous'):
Beta = self.path_matrix.ix[endoVar][endoVar]
Gamma = self.path_matrix.ix[endoVar][exoVar]
beta = [1 if (self.latent[i] in exoVar)
else 0 for i in range(self.lenlatent)]
beta = np.diag(beta)
beta_ = [1 for i in range(len(Beta))]
beta_ = np.diag(beta_)
beta = pd.DataFrame(beta, index=self.latent, columns=self.latent)
mid = pd.DataFrame.dot(Gamma.T, np.linalg.inv(beta_ - Beta.T))
mid = (mid.T.values).flatten('F')
k = 0
for j in range(len(exoVar)):
for i in range(len(endoVar)):
beta.ix[endoVar[i], exoVar[j]] = mid[k]
k += 1
elif (method == 'redundancy'):
beta = self.path_matrix.copy()
beta_ = pd.DataFrame(1, index=np.arange(
len(exoVar)), columns=np.arange(len(exoVar)))
beta.ix[exoVar, exoVar] = np.diag(np.diag(beta_.values))
elif (method == 'communality'):
beta = np.diag(np.ones(len(self.path_matrix)))
beta = pd.DataFrame(beta)
partial_ = pd.DataFrame.dot(self.outer_weights, beta.T.values)
prediction = pd.DataFrame.dot(partial_, self.outer_loadings.T.values)
predicted = pd.DataFrame.dot(self.data, prediction)
predicted.columns = self.manifests
mean_ = np.mean(self.data, 0)
intercept = mean_ - np.dot(mean_, prediction)
predictedData = predicted.apply(lambda row: row + intercept, axis=1)
return predictedData
def cr(self):
# Composite Reliability
composite = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
cor_mat = np.cov(block.T)
evals, evecs = np.linalg.eig(cor_mat)
U, S, V = np.linalg.svd(cor_mat, full_matrices=False)
indices = np.argsort(evals)
indices = indices[::-1]
evecs = evecs[:, indices]
evals = evals[indices]
loadings = V[0, :] * np.sqrt(evals[0])
numerador = np.sum(abs(loadings))**2
denominador = numerador + (p - np.sum(loadings ** 2))
cr = numerador / denominador
composite[self.latent[i]] = cr
else:
composite[self.latent[i]] = 1
composite = composite.T
return(composite)
def r2adjusted(self):
n = len(self.data_)
r2 = self.r2.values
r2adjusted = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
p = sum(self.LVariables['target'] == self.latent[i])
r2adjusted[self.latent[i]] = r2[i] - \
(p * (1 - r2[i])) / (n - p - 1)
return r2adjusted.T
def htmt(self):
htmt_ = pd.DataFrame(pd.DataFrame.corr(self.data_),
index=self.manifests, columns=self.manifests)
mean = []
allBlocks = []
for i in range(self.lenlatent):
block_ = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
allBlocks.append(list(block_.values))
block = htmt_.ix[block_, block_]
mean_ = (block - np.diag(np.diag(block))).values
mean_[mean_ == 0] = np.nan
mean.append(np.nanmean(mean_))
comb = [[k, j] for k in range(self.lenlatent)
for j in range(self.lenlatent)]
comb_ = [(np.sqrt(mean[comb[i][1]] * mean[comb[i][0]]))
for i in range(self.lenlatent ** 2)]
comb__ = []
for i in range(self.lenlatent ** 2):
block = (htmt_.ix[allBlocks[comb[i][1]],
allBlocks[comb[i][0]]]).values
# block[block == 1] = np.nan
comb__.append(np.nanmean(block))
htmt__ = np.divide(comb__, comb_)
where_are_NaNs = np.isnan(htmt__)
htmt__[where_are_NaNs] = 0
htmt = pd.DataFrame(np.tril(htmt__.reshape(
(self.lenlatent, self.lenlatent)), k=-1), index=self.latent, columns=self.latent)
return htmt
def comunalidades(self):
# Comunalidades
return self.outer_loadings**2
def AVE(self):
# AVE
return self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
def fornell(self):
cor_ = pd.DataFrame.corr(self.fscores)**2
AVE = self.comunalidades().apply(lambda column: column.sum() / (column != 0).sum())
for i in range(len(cor_)):
cor_.ix[i, i] = AVE[i]
return(cor_)
def rhoA(self):
# rhoA
rhoA = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
S = self.data_[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
S = pd.DataFrame.dot(S.T, S) / S.shape[0]
numerador = (
np.dot(np.dot(weights.T, (S - np.diag(np.diag(S)))), weights))
denominador = (
(np.dot(np.dot(weights.T, (result_ - np.diag(np.diag(result_)))), weights)))
rhoA_ = ((result)**2) * (numerador / denominador)
if(np.isnan(rhoA_.values)):
rhoA[self.latent[i]] = 1
else:
rhoA[self.latent[i]] = rhoA_.values
return rhoA.T
def xloads(self):
# Xloadings
A = self.data_.transpose().values
B = self.fscores.transpose().values
A_mA = A - A.mean(1)[:, None]
B_mB = B - B.mean(1)[:, None]
ssA = (A_mA**2).sum(1)
ssB = (B_mB**2).sum(1)
xloads_ = (np.dot(A_mA, B_mB.T) /
np.sqrt(np.dot(ssA[:, None], ssB[None])))
xloads = pd.DataFrame(
xloads_, index=self.manifests, columns=self.latent)
return xloads
def corLVs(self):
# Correlations LVs
corLVs_ = np.tril(pd.DataFrame.corr(self.fscores))
return pd.DataFrame(corLVs_, index=self.latent, columns=self.latent)
def alpha(self):
# Cronbach Alpha
alpha = pd.DataFrame(0, index=np.arange(1), columns=self.latent)
for i in range(self.lenlatent):
block = self.data_[self.Variables['measurement']
[self.Variables['latent'] == self.latent[i]]]
p = len(block.columns)
if(p != 1):
p_ = len(block)
correction = np.sqrt((p_ - 1) / p_)
soma = np.var(np.sum(block, axis=1))
cor_ = pd.DataFrame.corr(block)
denominador = soma * correction**2
numerador = 2 * np.sum(np.tril(cor_) - np.diag(np.diag(cor_)))
alpha_ = (numerador / denominador) * (p / (p - 1))
alpha[self.latent[i]] = alpha_
else:
alpha[self.latent[i]] = 1
return alpha.T
def vif(self):
vif = []
totalmanifests = range(len(self.data_.columns))
for i in range(len(totalmanifests)):
independent = [x for j, x in enumerate(totalmanifests) if j != i]
coef, resid = np.linalg.lstsq(
self.data_.ix[:, independent], self.data_.ix[:, i])[:2]
r2 = 1 - resid / \
(self.data_.ix[:, i].size * self.data_.ix[:, i].var())
vif.append(1 / (1 - r2))
vif = pd.DataFrame(vif, index=self.manifests)
return vif
def PLSc(self):
##################################################
# PLSc
rA = self.rhoA()
corFalse = self.corLVs()
for i in range(self.lenlatent):
for j in range(self.lenlatent):
if i == j:
corFalse.ix[i][j] = 1
else:
corFalse.ix[i][j] = corFalse.ix[i][
j] / np.sqrt(rA.ix[self.latent[i]] * rA.ix[self.latent[j]])
corTrue = np.zeros([self.lenlatent, self.lenlatent])
for i in range(self.lenlatent):
for j in range(self.lenlatent):
corTrue[j][i] = corFalse.ix[i][j]
corTrue[i][j] = corFalse.ix[i][j]
corTrue = pd.DataFrame(corTrue, corFalse.columns, corFalse.index)
# Loadings
attenuedOuter_loadings = pd.DataFrame(
0, index=self.manifests, columns=self.latent)
for i in range(self.lenlatent):
weights = pd.DataFrame(self.outer_weights[self.latent[i]])
weights = weights[(weights.T != 0).any()]
result = pd.DataFrame.dot(weights.T, weights)
result_ = pd.DataFrame.dot(weights, weights.T)
newLoad = (
weights.values * np.sqrt(rA.ix[self.latent[i]].values)) / (result.values)
myindex = self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]
myindex_ = self.latent[i]
attenuedOuter_loadings.ix[myindex.values, myindex_] = newLoad
# Path
dependent = np.unique(self.LVariables.ix[:, 'target'])
for i in range(len(dependent)):
independent = self.LVariables[self.LVariables.ix[
:, "target"] == dependent[i]]["source"]
dependent_ = corTrue.ix[dependent[i], independent]
independent_ = corTrue.ix[independent, independent]
# path = np.dot(np.linalg.inv(independent_),dependent_)
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
self.path_matrix.ix[dependent[i], independent] = coef
return attenuedOuter_loadings
# End PLSc
##################################################
def __init__(self, dados, LVcsv, Mcsv, scheme='path', regression='ols', h=0, maximo=300,
stopCrit=7, HOC='false', disattenuate='false', method='lohmoller'):
self.data = dados
self.LVcsv = LVcsv
self.Mcsv = Mcsv
self.maximo = maximo
self.stopCriterion = stopCrit
self.h = h
self.scheme = scheme
self.regression = regression
self.disattenuate = disattenuate
contador = 0
self.convergiu = 0
data = dados if type(
dados) is pd.core.frame.DataFrame else pd.read_csv(dados)
LVariables = pd.read_csv(LVcsv)
Variables = Mcsv if type(
Mcsv) is pd.core.frame.DataFrame else pd.read_csv(Mcsv)
latent_ = LVariables.values.flatten('F')
latent__ = np.unique(latent_, return_index=True)[1]
# latent = np.unique(latent_)
latent = [latent_[i] for i in sorted(latent__)]
self.lenlatent = len(latent)
# Repeating indicators
if (HOC == 'true'):
data_temp = pd.DataFrame()
for i in range(self.lenlatent):
block = self.data[Variables['measurement']
[Variables['latent'] == latent[i]]]
block = block.columns.values
data_temp = pd.concat(
[data_temp, data[block]], axis=1)
cols = list(data_temp.columns)
counts = Counter(cols)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
cols[cols.index(s)] = s + '.' + str(suffix)
data_temp.columns = cols
doublemanifests = list(Variables['measurement'].values)
counts = Counter(doublemanifests)
for s, num in counts.items():
if num > 1:
for suffix in range(1, num + 1):
doublemanifests[doublemanifests.index(
s)] = s + '.' + str(suffix)
Variables['measurement'] = doublemanifests
data = data_temp
# End data manipulation
manifests_ = Variables['measurement'].values.flatten('F')
manifests__ = np.unique(manifests_, return_index=True)[1]
manifests = [manifests_[i] for i in sorted(manifests__)]
self.manifests = manifests
self.latent = latent
self.Variables = Variables
self.LVariables = LVariables
data = data[manifests]
data_ = self.normaliza(data)
self.data = data
self.data_ = data_
outer_weights = pd.DataFrame(0, index=manifests, columns=latent)
for i in range(len(Variables)):
outer_weights[Variables['latent'][i]][
Variables['measurement'][i]] = 1
inner_paths = pd.DataFrame(0, index=latent, columns=latent)
for i in range(len(LVariables)):
inner_paths[LVariables['source'][i]][LVariables['target'][i]] = 1
path_matrix = inner_paths.copy()
if method == 'wold':
fscores = pd.DataFrame.dot(data_, outer_weights)
intera = self.lenlatent
intera_ = 1
# LOOP
for iterations in range(0, self.maximo):
contador = contador + 1
if method == 'lohmoller':
fscores = pd.DataFrame.dot(data_, outer_weights)
intera = 1
intera_ = self.lenlatent
# fscores = self.normaliza(fscores) # Old Mode A
for q in range(intera):
# Schemes
if (scheme == 'path'):
for h in range(intera_):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (sum(follow) > 0):
# i ~ follow
inner_paths.ix[inner_paths[follow].index, i] = np.linalg.lstsq(
fscores.ix[:, follow], fscores.ix[:, i])[0]
predec = (path_matrix.ix[:, i] == 1)
if (sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(len(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'fuzzy'):
for h in range(len(path_matrix)):
i = h if method == 'lohmoller' else q
follow = (path_matrix.ix[i, :] == 1)
if (sum(follow) > 0):
ac, awL, awR = otimiza(fscores.ix[:, i], fscores.ix[
:, follow], len(fscores.ix[:, follow].columns), 0)
inner_paths.ix[inner_paths[follow].index, i] = ac
predec = (path_matrix.ix[:, i] == 1)
if (sum(predec) > 0):
semi = fscores.ix[:, predec]
a_ = list(fscores.ix[:, i])
cor = [sp.stats.pearsonr(a_, list(semi.ix[:, j].values.flatten()))[
0] for j in range(len(semi.columns))]
inner_paths.ix[inner_paths[predec].index, i] = cor
elif (scheme == 'centroid'):
inner_paths = np.sign(pd.DataFrame.multiply(
pd.DataFrame.corr(fscores), (path_matrix + path_matrix.T)))
elif (scheme == 'factor'):
inner_paths = pd.DataFrame.multiply(
pd.DataFrame.corr(fscores), (path_matrix + path_matrix.T))
elif (scheme == 'horst'):
inner_paths = inner_paths
print(inner_paths)
if method == 'wold':
fscores[self.latent[q]] = pd.DataFrame.dot(
fscores, inner_paths)
elif method == 'lohmoller':
fscores = pd.DataFrame.dot(fscores, inner_paths)
last_outer_weights = outer_weights.copy()
# Outer Weights
for i in range(self.lenlatent):
# Reflexivo / Modo A
if(Variables['mode'][Variables['latent'] == latent[i]]).any() == "A":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
# 1/N (Z dot X)
res_ = (1 / len(data_)) * np.dot(b, a)
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / np.std(res_) # New Mode A
# Formativo / Modo B
elif(Variables['mode'][Variables['latent'] == latent[i]]).any() == "B":
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
# (X'X)^-1 X'Y
a_ = np.dot(a.T, a)
inv_ = np.linalg.inv(a_)
res_ = np.dot(np.dot(inv_, a.T),
fscores.ix[:, latent[i]])
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_weights.ix[myindex.values,
myindex_] = res_ / (np.std(np.dot(data_.ix[:, myindex], res_)))
if method == 'wold':
fscores = pd.DataFrame.dot(fscores, inner_paths)
diff_ = np.max(
np.max((abs(last_outer_weights) - abs(outer_weights))**2))
if (diff_ < (10**(-(self.stopCriterion)))):
self.convergiu = 1
break
# END LOOP
# print(contador)
# Bootstraping trick
if(np.isnan(outer_weights).any().any()):
self.convergiu = 0
return None
# Standardize Outer Weights (w / || scores ||)
divide_ = np.diag(1 / (np.std(np.dot(data_, outer_weights), 0)
* np.sqrt((len(data_) - 1) / len(data_))))
outer_weights = np.dot(outer_weights, divide_)
outer_weights = pd.DataFrame(
outer_weights, index=manifests, columns=latent)
fscores = pd.DataFrame.dot(data_, outer_weights)
# Outer Loadings
outer_loadings = pd.DataFrame(0, index=manifests, columns=latent)
for i in range(self.lenlatent):
a = data_[Variables['measurement'][
Variables['latent'] == latent[i]]]
b = fscores.ix[:, latent[i]]
cor_ = [sp.stats.pearsonr(a.ix[:, j], b)[0]
for j in range(len(a.columns))]
myindex = Variables['measurement'][
Variables['latent'] == latent[i]]
myindex_ = latent[i]
outer_loadings.ix[myindex.values, myindex_] = cor_
# Paths
if (regression == 'fuzzy'):
path_matrix_low = path_matrix.copy()
path_matrix_high = path_matrix.copy()
path_matrix_range = path_matrix.copy()
r2 = pd.DataFrame(0, index=np.arange(1), columns=latent)
dependent = np.unique(LVariables.ix[:, 'target'])
for i in range(len(dependent)):
independent = LVariables[LVariables.ix[
:, "target"] == dependent[i]]["source"]
dependent_ = fscores.ix[:, dependent[i]]
independent_ = fscores.ix[:, independent]
if (self.regression == 'ols'):
# Path Normal
coef, resid = np.linalg.lstsq(independent_, dependent_)[:2]
# model = sm.OLS(dependent_, independent_)
# results = model.fit()
# print(results.summary())
# r2[dependent[i]] = results.rsquared
r2[dependent[i]] = 1 - resid / \
(dependent_.size * dependent_.var())
path_matrix.ix[dependent[i], independent] = coef
# pvalues.ix[dependent[i], independent] = results.pvalues
elif (self.regression == 'fuzzy'):
size = len(independent_.columns)
ac, awL, awR = otimiza(dependent_, independent_, size, self.h)
# plotaIC(dependent_, independent_, size)
ac, awL, awR = (ac[0], awL[0], awR[0]) if (
size == 1) else (ac, awL, awR)
path_matrix.ix[dependent[i], independent] = ac
path_matrix_low.ix[dependent[i], independent] = awL
path_matrix_high.ix[dependent[i], independent] = awR
# Matrix Fuzzy
for i in range(len(path_matrix.columns)):
for j in range(len(path_matrix.columns)):
path_matrix_range.ix[i, j] = str(round(
path_matrix_low.ix[i, j], 3)) + ' ; ' + str(round(path_matrix_high.ix[i, j], 3))
r2 = r2.T
self.path_matrix = path_matrix
self.outer_weights = outer_weights
self.fscores = fscores
#################################
# PLSc
if disattenuate == 'true':
outer_loadings = self.PLSc()
##################################
# Path Effects
indirect_effects = pd.DataFrame(0, index=latent, columns=latent)
path_effects = [None] * self.lenlatent
path_effects[0] = self.path_matrix
for i in range(1, self.lenlatent):
path_effects[i] = pd.DataFrame.dot(
path_effects[i - 1], self.path_matrix)
for i in range(1, len(path_effects)):
indirect_effects = indirect_effects + path_effects[i]
total_effects = indirect_effects + self.path_matrix
if (regression == 'fuzzy'):
self.path_matrix_high = path_matrix_high
self.path_matrix_low = path_matrix_low
self.path_matrix_range = path_matrix_range
self.total_effects = total_effects.T
self.indirect_effects = indirect_effects
self.outer_loadings = outer_loadings
self.contador = contador
self.r2 = r2
def impa(self):
# Unstandardized Scores
scale_ = np.std(self.data, 0)
outer_weights_ = pd.DataFrame.divide(
self.outer_weights, scale_, axis=0)
relativo = pd.DataFrame.sum(outer_weights_, axis=0)
for i in range(len(outer_weights_)):
for j in range(len(outer_weights_.columns)):
outer_weights_.ix[i, j] = (
outer_weights_.ix[i, j]) / relativo[j]
unstandardizedScores = pd.DataFrame.dot(self.data, outer_weights_)
# Rescaled Scores
rescaledScores = pd.DataFrame(0, index=range(
len(self.data)), columns=self.latent)
for i in range(self.lenlatent):
block = self.data[self.Variables['measurement'][
self.Variables['latent'] == self.latent[i]]]
maximo = pd.DataFrame.max(block, axis=0)
minimo = pd.DataFrame.min(block, axis=0)
minimo_ = pd.DataFrame.min(minimo)
maximo_ = pd.DataFrame.max(maximo)
rescaledScores[self.latent[
i]] = 100 * (unstandardizedScores[self.latent[i]] - minimo_) / (maximo_ - minimo_)
# Manifests Indirect Effects
manifestsIndEffects = pd.DataFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
effect_ = pd.DataFrame(
self.outer_weights, index=self.manifests, columns=self.latent)
for i in range(len(self.latent[i])):
effect_ = pd.DataFrame.dot(effect_, self.path_matrix.T)
manifestsIndEffects = manifestsIndEffects + effect_
# Peformance Scores LV
performanceScoresLV = pd.DataFrame.mean(rescaledScores, axis=0)
# Performance Manifests
maximo = pd.DataFrame.max(self.data, axis=0)
minimo = | pd.DataFrame.min(self.data, axis=0) | pandas.DataFrame.min |
from typing import List, Union, Dict, Any, Tuple
import os
import json
from glob import glob
from dataclasses import dataclass
import functools
import argparse
from sklearn import metrics
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from datasets import ClassLabel, load_dataset, load_metric
from utils import *
from dataset_configs import *
tqdm.pandas() # enable progress_apply
def flatten_dataset_to_table(dataset) -> pd.DataFrame:
"""Convert the HF Dataset to a Pandas DataFrame"""
results = []
for e_id, example in enumerate(tqdm(dataset)):
cur_len = len(example["words"])
results.extend(
[
[
e_id,
i,
example["words"][i],
example["labels"][i],
example["block_ids"][i],
example["line_ids"][i],
]
for i in range(cur_len)
]
)
return pd.DataFrame(
results,
columns=["sample_id", "word_id", "word", "label", "block_id", "line_id"],
)
def load_dataset_and_flatten(dataset_path) -> pd.DataFrame:
if os.path.exists(dataset_path.replace(".json", ".cached.csv")):
return pd.read_csv(dataset_path.replace(".json", ".cached.csv"))
else:
dataset = load_dataset("json", data_files=dataset_path, field="data")
df = flatten_dataset_to_table(dataset["train"])
df.to_csv(dataset_path.replace(".json", ".cached.csv"), index=None)
return df
def _preprocess_prediction_table(
test_df, pred_df, most_frequent_category=None, label_mapping: Dict = None
) -> pd.DataFrame:
"""Merge the prediction table with the original gt table
to 1) fetch the gt and 2) insert some "un-tokenized" tokens
"""
merged_df = test_df.merge(
pred_df.loc[:, ["sample_id", "word_id", "pred"]],
how="outer",
on=["sample_id", "word_id"],
)
if label_mapping is not None:
merged_df["pred"] = merged_df["pred"].map(label_mapping)
if most_frequent_category is None:
most_frequent_category = test_df["label"].value_counts().index[0]
merged_df["pred"] = merged_df["pred"].fillna(
most_frequent_category
) # fill in the most frequent category
return merged_df
@dataclass
class ModelConfig:
task_name: str = ""
model_name: str = ""
variant: str = ""
def put_model_config_at_the_first(func):
@functools.wraps(func)
def wrap(self, *args, **kwargs):
df = func(self, *args, **kwargs)
columns = df.columns
return df[["task_name", "model_name", "variant"] + list(columns[:-3])]
return wrap
class SingleModelPrediction:
"""Methods for processing the "test_predictions" tables for an individual model"""
def __init__(
self,
df,
label_space,
model_config: ModelConfig,
gt_name="label",
pred_name="pred",
used_metric="entropy",
):
self.df = df
self.label_space = label_space
self.gt_name = gt_name
self.pred_name = pred_name
self.model_config = model_config
self.used_metric = used_metric
@classmethod
def from_raw_prediction_table(
cls,
test_df,
pred_df,
label_space,
model_config,
most_frequent_category: int = None,
label_mapping=None,
used_metric="entropy",
**kwargs,
):
merged_df = _preprocess_prediction_table(
test_df,
pred_df,
most_frequent_category,
label_mapping=label_mapping,
)
return cls(
merged_df, label_space, model_config, used_metric=used_metric, **kwargs
)
def groupby(self, level):
assert level in ["block", "line"]
return self.df.groupby(["sample_id", f"{level}_id"])
def calculate_per_category_scores(self):
_scores = precision_recall_fscore_support(
self.df[self.gt_name],
self.df[self.pred_name],
labels=self.label_space,
zero_division=0,
)
_scores = pd.DataFrame(
_scores,
columns=self.label_space,
index=["precision", "recall", "f-score", "support"],
)
return _scores
def calculate_accuracy_for_group(self, gp, score_average="micro"):
accuracy = (gp[self.gt_name] == gp[self.pred_name]).mean()
precision, recall, fscore, _ = precision_recall_fscore_support(
gp[self.gt_name],
gp[self.pred_name],
average=score_average,
labels=self.label_space,
zero_division=0,
)
return {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"fscore": fscore,
}
def calculate_gini_score_for_group(self, gp):
cts = gp[self.pred_name].value_counts()
if len(cts) == 1:
return 0
else:
return 1 - ((cts / cts.sum()) ** 2).sum()
def calculate_entropy_for_group(self, gp):
cts = gp[self.pred_name].value_counts()
if len(cts) == 1:
return 0
else:
prob = cts / cts.sum()
entropy = -(prob * np.log2(prob)).sum()
return entropy
def create_page_level_accuracy_report(self) -> pd.DataFrame:
return (
self.df.groupby("sample_id")
.apply(self.calculate_accuracy_for_group)
.apply(pd.Series)
)
def create_page_level_gini_report(self, level="block") -> pd.Series:
gini = self.groupby(level=level).apply(self.calculate_gini_score_for_group)
gini = (
gini.to_frame()
.rename(columns={0: "gini"})
.reset_index()
.groupby("sample_id")
.gini.mean()
)
return gini
def create_page_level_entropy_report(self, level="block") -> pd.Series:
entropy = self.groupby(level=level).apply(self.calculate_entropy_for_group)
entropy = (
entropy.to_frame()
.rename(columns={0: "entropy"})
.reset_index()
.groupby("sample_id")
.entropy.mean()
)
return entropy
def create_page_level_ami_report(self) -> pd.DataFrame:
ami = (
self.df.groupby("sample_id")
.apply(
lambda gp: metrics.adjusted_mutual_info_score(
gp[self.gt_name], gp[self.pred_name]
)
)
.to_frame()
.rename(columns={0: "ami"})
)
return ami
def create_page_level_overall_report(self) -> pd.DataFrame:
report = self.create_page_level_accuracy_report()
report["gini"] = self.create_page_level_gini_report()
report["entropy"] = self.create_page_level_entropy_report()
return report
def create_all_page_accuracy_report(self) -> pd.Series:
return pd.Series(
self.calculate_accuracy_for_group(self.df, score_average="macro")
)
def create_all_page_ami_report(self) -> pd.Series:
return pd.Series(self.create_page_level_ami_report().mean())
def create_all_page_gini_report(self, level="block") -> pd.Series:
gini = self.create_page_level_gini_report(level=level)
report = pd.Series(
{
f"gini_{level}_average": gini.mean(),
f"gini_{level}_std": gini.std(),
f"gini_{level}_nonzero": gini[gini > 0].count(),
}
)
return report
def create_all_page_entropy_report(self, level="block") -> pd.Series:
entropy = self.create_page_level_entropy_report(level=level)
report = pd.Series(
{
f"entropy_{level}_average": entropy.mean(),
f"entropy_{level}_std": entropy.std(),
f"entropy_{level}_nonzero": entropy[entropy > 0].count(),
}
)
return report
def create_all_page_overall_report(self, add_line_level_gini=False) -> pd.Series:
report = self.create_all_page_accuracy_report()
if self.used_metric == "gini":
gini = self.create_all_page_gini_report()
if add_line_level_gini:
gini = gini.append(self.create_all_page_gini_report(level="line"))
report = report.append(gini)
elif self.used_metric == "entropy":
entropy = self.create_all_page_entropy_report()
if add_line_level_gini:
entropy = entropy.append(
self.create_all_page_entropy_report(level="line")
)
report = report.append(entropy)
report = report.append(self.create_all_page_ami_report())
return report
def majority_voting_postprocessing(self, level) -> "SingleModelPrediction":
"""This method attempts to use majority voting for model predictions within each
group (level) to improve the accuracy. It will firstly use groupby the elements
within each group, then find the most common class in the predicted categoires,
and replace the others as the predicted category.
"""
# It might take a while
df = (
self.groupby(level=level)
.progress_apply(
lambda gp: gp.assign(pred=gp[self.pred_name].value_counts().index[0])
)
.reset_index(drop=True)
)
return self.__class__(
df,
**{key: getattr(self, key) for key in self.__dict__.keys() if key != "df"},
)
@dataclass
class MultiModelPrediction:
"""Methods for processing the "test_predictions" tables for multiple models
within a Experiment
"""
predictions: List[SingleModelPrediction]
name: str
def create_per_category_report(self) -> pd.DataFrame:
reports = []
for prediction in self.predictions:
report = prediction.calculate_per_category_scores()
report["task_name"] = prediction.model_config.task_name
report["model_name"] = prediction.model_config.model_name
report["variant"] = prediction.model_config.variant
reports.append(report)
return pd.concat(reports)
@put_model_config_at_the_first
def create_overall_report(self) -> pd.DataFrame:
reports = []
for prediction in self.predictions:
report = prediction.create_all_page_overall_report()
report["task_name"] = prediction.model_config.task_name
report["model_name"] = prediction.model_config.model_name
report["variant"] = prediction.model_config.variant
reports.append(report)
return pd.DataFrame(reports)
@put_model_config_at_the_first
def create_overall_report_with_majority_voting_postprocessing(
self, level
) -> pd.DataFrame:
reports = []
for prediction in self.predictions:
report = prediction.majority_voting_postprocessing(
level=level
).create_all_page_overall_report()
report["task_name"] = prediction.model_config.task_name
report["model_name"] = prediction.model_config.model_name
report["variant"] = prediction.model_config.variant
reports.append(report)
return pd.DataFrame(reports)
@classmethod
def from_experiment_folder(
cls,
experiment_folder,
test_df,
label_space,
experiment_name=None,
most_frequent_category=None,
label_mapping=None,
used_metric="entropy",
prediction_filename="test_predictions.csv",
):
if experiment_name is None:
experiment_name = os.path.basename(experiment_folder)
predictions = []
model_names = glob(f"{experiment_folder}/*")
for model_name in tqdm(model_names):
try:
df = pd.read_csv(f"{model_name}/{prediction_filename}")
model_name = os.path.basename(model_name)
model_config = ModelConfig(
task_name=experiment_name,
model_name=model_name,
)
predictions.append(
SingleModelPrediction.from_raw_prediction_table(
test_df=test_df,
pred_df=df,
label_space=label_space,
model_config=model_config,
most_frequent_category=most_frequent_category,
label_mapping=label_mapping,
used_metric=used_metric,
)
)
except:
print(f"Error loading for {model_name} in MultiModelPrediction")
return cls(predictions, experiment_name)
class SingleModelRecord:
"""Methods for processing training records for a single model"""
def __init__(
self,
model_folder,
model_config,
trainer_states_name="trainer_state.json",
all_results_name="all_results.json",
training_args_name="training_args.bin",
):
self.model_config = model_config
self.trainer_states = load_json(f"{model_folder}/{trainer_states_name}")
self.all_results = load_json(f"{model_folder}/{all_results_name}")
self.training_args = torch.load(
f"{model_folder}/{training_args_name}"
).to_dict()
def load_acc_history(self) -> pd.DataFrame:
cur_report = []
for ele in self.trainer_states["log_history"]:
cur_report.append(
[
ele["step"],
ele["epoch"],
ele.get("eval_fscore"),
ele.get("eval_accuracy"),
]
)
df = pd.DataFrame(cur_report, columns=["step", "epoch", "f1-score", "acc"])
return df
def load_loss_history(self) -> pd.DataFrame:
cur_report = []
for ele in self.trainer_states["log_history"]:
if "loss" in ele:
cur_report.append([ele["step"], ele["epoch"], ele["loss"]])
df = pd.DataFrame(cur_report, columns=["step", "epoch", "loss"])
return df
def load_train_history(self) -> pd.DataFrame:
acc_record = self.load_acc_history()
loss_record = self.load_loss_history()
merged = acc_record.merge(loss_record, how="outer")
return merged
def load_computation_record(self) -> pd.Series:
return pd.Series(
{
"gpus": self.training_args["_n_gpu"],
"batch_size": self.training_args["per_device_train_batch_size"],
"epochs": self.training_args["num_train_epochs"],
"learning_rate": self.training_args["learning_rate"],
"warmup_steps": self.training_args["warmup_steps"],
"train_samples": self.all_results["train_samples"],
"train_flos": self.trainer_states["total_flos"],
"train_steps": self.trainer_states["max_steps"],
"train_runtime": self.all_results["train_runtime"],
"eval_runtime": self.all_results["eval_runtime"],
"eval_samples": self.all_results["eval_samples"],
"eval_samples_per_second": self.all_results["eval_samples_per_second"],
"eval_fscore": self.all_results["eval_fscore"],
}
)
@dataclass
class MultiModelRecord:
records: List[SingleModelRecord]
name: str
@put_model_config_at_the_first
def load_train_history(self) -> pd.DataFrame:
reports = []
for record in self.records:
report = record.load_train_history()
report["task_name"] = record.model_config.task_name
report["model_name"] = record.model_config.model_name
report["variant"] = record.model_config.variant
reports.append(report)
return pd.concat(reports)
@put_model_config_at_the_first
def load_computation_record(self) -> pd.DataFrame:
reports = []
for record in self.records:
report = record.load_computation_record()
report["task_name"] = record.model_config.task_name
report["model_name"] = record.model_config.model_name
report["variant"] = record.model_config.variant
reports.append(report)
return pd.DataFrame(reports)
@classmethod
def from_experiment_folder(
cls,
experiment_folder,
experiment_name=None,
trainer_states_name="trainer_state.json",
all_results_name="all_results.json",
training_args_name="training_args.bin",
):
if experiment_name is None:
experiment_name = os.path.basename(experiment_folder)
records = []
model_names = glob(f"{experiment_folder}/*")
for model_name in tqdm(model_names):
try:
model_config = ModelConfig(
task_name=experiment_name, model_name=os.path.basename(model_name)
)
records.append(
SingleModelRecord(
model_name,
model_config,
trainer_states_name=trainer_states_name,
all_results_name=all_results_name,
training_args_name=training_args_name,
)
)
except:
print(f"Error loading for {model_name}")
return cls(records, experiment_name)
@dataclass
class CombinedReport:
records: MultiModelRecord
predictions: MultiModelPrediction
def report(self, with_majority_voting=True) -> pd.DataFrame():
computational_report = self.records.load_computation_record()
scores_report = self.predictions.create_overall_report()
if not with_majority_voting:
return computational_report.merge(
scores_report, on=["task_name", "model_name", "variant"]
).set_index(["task_name", "model_name", "variant"])
else:
scores_report_with_majority_voting = self.predictions.create_overall_report_with_majority_voting_postprocessing(
level="block"
)
return (
computational_report.merge(
scores_report, on=["task_name", "model_name", "variant"]
)
.merge(
scores_report_with_majority_voting,
on=["task_name", "model_name", "variant"],
suffixes=("", "_majority_voting"),
)
.set_index(["task_name", "model_name", "variant"])
)
def report_per_category_scores(
self, column_names: Union[List, Dict] = None
) -> pd.DataFrame:
scores_report = self.predictions.create_per_category_report()
if column_names is not None:
if isinstance(column_names, list):
scores_report.columns = column_names
elif isinstance(column_names, dict):
scores_report.columns = [
column_names.get(col, col) for col in scores_report.columns
]
return scores_report.reset_index().set_index(
["task_name", "model_name", "variant", "index"]
)
@classmethod
def from_experiment_folder(
cls,
experiment_folder,
test_df,
label_space,
experiment_name=None,
most_frequent_category=None,
prediction_filename="test_predictions.csv",
trainer_states_name="trainer_state.json",
all_results_name="all_results.json",
training_args_name="training_args.bin",
used_metric="entropy",
):
predictions = MultiModelPrediction.from_experiment_folder(
experiment_folder,
test_df,
label_space,
experiment_name=experiment_name,
most_frequent_category=most_frequent_category,
prediction_filename=prediction_filename,
used_metric=used_metric,
)
records = MultiModelRecord.from_experiment_folder(
experiment_folder,
experiment_name=experiment_name,
trainer_states_name=trainer_states_name,
all_results_name=all_results_name,
training_args_name=training_args_name,
)
return cls(records, predictions)
def generate_eval_report_for_experiment(experiment_folder, args):
assert os.path.isdir(experiment_folder), f"{experiment_folder} does not exist"
print(f"Working on generating experiment results for {experiment_folder}")
dataset = instiantiate_dataset(args.dataset_name)
test_df = load_dataset_and_flatten(dataset.test_file)
all_labels = load_json(dataset.label_map_file)
label2id = {val:int(key) for key, val in all_labels.items()}
id2label = {int(key):val for key, val in all_labels.items()}
dataset_labels = list(id2label.keys())
print(f"Loading from {experiment_folder}")
report = CombinedReport.from_experiment_folder(
experiment_folder,
test_df=test_df,
label_space=dataset_labels,
)
report_folder = os.path.join(experiment_folder, args.report_folder_name)
os.makedirs(report_folder, exist_ok=True)
report_df = report.report()
report_df.to_csv(os.path.join(report_folder, "report.csv"))
if args.store_per_class:
report_df_per_cat = report.report_per_category_scores()
report_df_per_cat.to_csv(os.path.join(report_folder, "report_per_class.csv"))
return report_df, report_df_per_cat
return report_df, None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, help='The name of the used dataset')
parser.add_argument('--base_path', default="../checkpoints", help='The checkpoint base path')
parser.add_argument('--experiment_name', default=None, type=str, help='The name of the experiment.')
parser.add_argument('--store_per_class', action='store_true', help='Store per class accuracy scores.')
parser.add_argument('--report_folder_name', default="_reports", help='The name of the folder for saving reports')
args = parser.parse_args()
dataset_path = os.path.join(args.base_path, args.dataset_name.lower())
if args.experiment_name is not None:
experiment_folder = os.path.join(dataset_path, args.experiment_name)
generate_eval_report_for_experiment(experiment_folder, args)
else:
print(f"No experiment_name is specified, iterating all the experiment folders in {args.base_path=}")
all_report_df = []
all_report_df_per_cat = []
for experiment_name in os.listdir(dataset_path):
if not experiment_name.startswith(".") and experiment_name != args.report_folder_name:
experiment_folder = os.path.join(dataset_path, experiment_name)
report_df, report_df_per_cat = generate_eval_report_for_experiment(experiment_folder, args)
all_report_df.append(report_df)
all_report_df_per_cat.append(report_df_per_cat)
all_report_df = | pd.concat(all_report_df) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
# Copyright (C) 2020 <NAME>
# Use of this source code is governed by the MIT License
###############################################################################
from . import config
from .metadata import metadata
from . import linesholder
from . import linesops
from .. import SEED_AVG, SEED_LAST, SEED_SUM, SEED_NONE, SEED_ZERO, SEED_ZFILL
import numpy as np
import pandas as pd
__all__ = ['Line', 'Lines']
def _generate(cls, bases, dct, name='', klass=None, **kwargs):
# If "name" is defined (inputs, outputs) it overrides any previous
# definition from the base clases.
# An extension can be done by using "name_extend" (inputs_extend) in which
# case the definition will be appended to that of the base classes
# In case of a redefinition, automatic mappings to the existing definitions
# (by index) will be done to ensure "instances" do still work in base
# classes when going the super route
# Manual mappings can also be defined if a definition is a dictionary like
# in:
# outputs = {'atr': 'tr'}
# In this case 'atr' is the new output and the base class had a 'tr' output
# and now whenenver 'tr' is referenced it will point to 'atr'
# Get actual lines definition and that of the bases
clsdefs = dct.get(name, ()) # new defs
# support remapping lines in subclasses
cdefs = [] # collect final single new definitions
defmappings = {} # collect any mappings
# one can specify a single input (str) or single remapping (dict)
if isinstance(clsdefs, (dict, str,)):
clsdefs = [clsdefs] # unpacked below
for clsdef in clsdefs:
# if a "line" def contains a list or a tuple, it is expected to have 2
# elements defining a remapping. key=>val where key is the new name and
# value is the old name, defined in the base class. Make it a dict to
# support the general case in which it was already a dict
if isinstance(clsdef, (list, tuple,)):
clsdef = dict([clsdef]) # and go to dict case
if isinstance(clsdef, dict):
cdefs.extend(list(clsdef))
defmappings.update(clsdef) # store mapping to genreate properties
else: # assume str or else detect and raise exception if not
cdefs.append(clsdef)
# After having parsed mappings in dict form, create the actual definition
clsdefs = tuple(cdefs)
# Gather base definitions - needed here to do mappings
lbases = (getattr(base, name, ()) for base in bases)
lbdefs = tuple(ldef for lbase in lbases for ldef in lbase)
if clsdefs: # a new definition was made
final_defs = clsdefs
for clsdef, lbdef in zip(clsdefs, lbdefs): # create automappings
if lbdef in clsdefs: # cannot remap if exists in current defs
continue
defmappings.setdefault(clsdef, lbdef)
else:
# no new definition, see if _extend has been put in place
clsdefs = dct.get(name + '_extend', ()) # new defs
if isinstance(clsdefs, str):
clsdefs = (clsdefs,) # unpacked below
final_defs = lbdefs + clsdefs
# removed remapped lines from definitions
remapped = list(defmappings.values())
# retain last inputs defs - super readable and pythonic one-liner
lines = tuple(reversed(list(dict.fromkeys(reversed(final_defs)))))
lines = tuple(x for x in lines if x not in remapped)
setattr(cls, name, lines) # install all lines defs
# Create base dictionary for subclassing via typ
clsdct = dict(__module__=cls.__module__, __slots__=list(lines))
# Create properties for attribute retrieval of old line
propdct = {}
for name, alias in defmappings.items():
def get_alias_to_name(self):
return getattr(self, name)
def set_alias_to_name(self, value):
setattr(self, name, value)
propdct[alias] = property(get_alias_to_name, set_alias_to_name)
clsdct.update(propdct) # add properties for alias remapping
clsname = name.capitalize() + cls.__name__ # decide name
return type(clsname, (klass,), clsdct) # subclass and return
def binary_op(name):
def real_binary_op(self, other, *args, **kwargs):
# Executes a binary operation where self is guaranteed to have a
# _series attribute but other isn't. Example > or +
# The minimum period is taken into account to only apply the operation
# to the proper range and store in the result in that range. The rest
# is a bunch of leading 'NaN'
# See if other has a minperiod, else default to 1
minperiod = max(self._minperiod, getattr(other, '_minperiod', 1))
minidx = minperiod - 1 # minperiod is 1-based, easier for location
# Prepare a result filled with 'Nan'
result = pd.Series(np.nan, index=self._series.index)
# Get and prepare the other operand
other = getattr(other, '_series', other) # get real other operand
other = other[minidx:] if isinstance(other, pd.Series) else other
# Get the operation, exec and store
binop = getattr(self._series[minidx:], name) # get op from series
result[minidx:] = r = binop(other, *args, **kwargs) # exec / store
result = result.astype(r.dtype, copy=False)
return self._clone(result, period=minperiod) # ret new obj w minperiod
linesops.install_cls(name=name, attr=real_binary_op)
def standard_op(name, parg=None, sargs=False, skwargs=False):
def real_standard_op(self, *args, **kwargs):
# Prepare a result filled with 'Nan'
result = pd.Series(np.nan, index=self._series.index)
# get the series capped to actual period to consider
a = args if sargs else tuple()
kw = kwargs if skwargs else {}
minperiod, minidx, a, kw = self._minperiodize(*a, **kw)
if sargs:
args = a
if skwargs:
kwargs = kw
# get the operation from a view capped to the max minperiod
stdop = getattr(self._series[minidx:], name)
result[minidx:] = r = stdop(*args, **kwargs) # execute and assign
result = result.astype(r.dtype, copy=False) # keep dtype intact
line = self._clone(result, period=minperiod) # create resulting line
if parg: # consider if the operation increases the minperiod
line._minperiod += kwargs.get(parg)
return line
linesops.install_cls(name=name, attr=real_standard_op)
def reduction_op(name, sargs=False, *args, **kwargs):
def real_reduction_op(self, *args, **kwargs):
if sargs:
_, minidx, args, _ = self._minperiodize(*args)
else:
minidx = self._minperiod - 1
red_op = getattr(self._series[minidx:], name)
return red_op(*args, **kwargs)
linesops.install_cls(name=name, attr=real_reduction_op)
# Below if _ewm is called
#
# - Calculating the p1:p2 range which will be used to calculate the
# single seed value with an arithmetic average (i.e.: "mean")
# The following are true for p1 and p2
# - p1 >= 0
# - p2 >= (p1 + self.p.preiod)
# - Creating a [0:p2] long seed array filled with NaN
# - Calculating the mean of input[p1:p2] and putting it a p2
# - Concatenating seed array + rest data and storing it at outputs[0],
# (output name is unknown but: subclasses will have an output)
# The parameter "poffset" allows to start the calulation at an offset. This
# is used to replicate the internal ta-lib behavior with ema when
# calculating the fast ema of the macd, where the start of the delivery of
# data is offset to the period of the slow ema.
# For regular usage, poffset is always 0 and plays no role. If poffset
# didn't exist, the calculation of p1 and p2 would simpler
# - p1 = self._minperiod - 1
# - p2 = p1 + self.p.period
#
# but due to poffset the calculation is made backwards
# - poffset = (poffset or period) # assume here poffset > period
# - p2 = self._minperiod - 1 + poffset # seed end calc
# - p1 = p2 - period # beginning of seed calculation
def multifunc_op(name, parg=None, propertize=False):
class _MultiFunc_Op:
def __init__(self, line, *args, **kwargs):
# plethora of vals needed later in __getattr__/__getitem__
self._is_seeded = False
self._line = line
self._series = series = line._series
self._minperiod = line._minperiod
# if the end user passes alpha=None, it means that the alpha
# calculation for an ewm will be done directy by the caller using
# apply. This can only be achieved if instead of delivering ewm,
# rolling(window=2) is returned (the end user should not do that,
# because the minperiod calculations would be off)
self._alpha_ = None
lsname = name.lstrip('_') # left stripped name (lsname)
# get/pop period related parameter ... as needed for multi-ewm
if lsname == 'ewm':
if 'alpha' in kwargs: # all bets are on 'alpha'
# period cannot be recovered, force the user to specify it
# use a default value of 0 to indicate that the period of
# the calling line has to be used even if alphas carry a
# period. See below the alpha period check against offset
self._pval = kwargs.pop('span', 0)
alpha = kwargs['alpha'] # it is there ...
if isinstance(alpha, (int, float)):
pass # regular behavior
else: # dynamic alpha which can be calc'ed by _mean_
self._alpha_ = alpha
kwargs['alpha'] = 1.0
elif 'halflife' in kwargs:
# period cannot be recovered, force the user to specify it
self._pval = kwargs.pop('span') # exception if not there
elif 'com' in kwargs:
self._pval = kwargs.get('com') + 1 # alpha = 1 / (com + 1)
elif 'span' in kwargs:
# must be, period cannot be infered from alpha/halflife
self._pval = kwargs.get('span') # alpha = 2 / (alpha + 1)
else:
self._pval = kwargs.get(parg)
# set alphaperiod which is needed in the future
self._alpha_p = getattr(self._alpha_, '_minperiod', 1)
# Extra processing if special _ewm
if name == '_ewm': # specific behavior for custom _ewm
# exp smoothing in tech analysis uses 'adjust=False'
kwargs.setdefault('adjust', False) # set if not given
# collect special parameters
self._pearly = _pearly = kwargs.pop('_pearly', 0)
self._poffset = kwargs.pop('_poffset', 0)
self._seed = _seed = kwargs.pop('_seed', SEED_AVG)
# Determine where the actual calculation is offset to. _poffset
# is there to support the failure made by ta-lib when offseting
# the fast ema in the macd. _pofffset > _pval
poffset = self._poffset or self._pval
# For a dynamic alpha like in KAMA, the period of the dynamic
# alpha can exceed that of the calculated offset. But ta-lib
# makes a mistake an calculates that without taking that period
# into account if _seed is activated
# If no pval has been provided (span), don't take the alpha
# period, the period of the calling line will be used
if self._pval and self._alpha_p > poffset:
poffset += self._alpha_p - poffset - 1
p2 = self._minperiod - 1 + poffset - _pearly # seed end calc
p1 = p2 - self._pval # beginning of seed calculation
# beginning of result calculation. Includes the calculated seed
# value which is the 1st value to be returned. Except in KAMA,
# where ta-lib uses the value before that as seed for the
# exponential smoothing calculation
self._minidx = pidx = p2 - 1 # beginning of result calculation
trailprefix = pd.Series(np.nan, index=series.index[pidx:p2])
# Determine the actul seed value to use
if _seed == SEED_AVG:
trailprefix[-1] = series[p1:p2].mean()
elif _seed == SEED_LAST:
trailprefix[-1] = series[pidx]
elif _seed == SEED_SUM:
trailprefix[-1] = series[p1:p2].sum()
elif _seed == SEED_NONE:
pass # no seed wished ... do nothing
elif _seed == SEED_ZERO:
trailprefix[-1] = 0.0
elif _seed == SEED_ZFILL:
trailprefix[:] = 0.0
# complete trailer: prefix (seed at end) + series vals to calc
trailer = trailprefix.append(series[p2:])
else:
self._pearly = 0 # it will be checked in getattr
self._minidx = self._minperiod - 1
trailer = series[self._minidx:]
self._multifunc = getattr(trailer, lsname)(*args, **kwargs)
def _mean_exp(self, alpha, beta=None): # recurisive definition
# alpha => new data, beta => old data (similar to 1-alpha)
if not beta:
beta = 1.0 - alpha
def _sm_acc(x):
prev = x[0]
for i in range(1, len(x)):
x[i] = prev = beta * prev + alpha * x[i]
return x
return self._apply(_sm_acc) # trigger __getattr__ for _apply
def _lfilter(self, alpha, beta=None): # recurisive definition
try:
import scipy.signal
except ImportError: # if not available use tight loop
return self._mean_exp(alpha, beta)
# alpha => new data, beta => old data (similar to 1-alpha)
if not beta:
beta = 1.0 - alpha
def _sp_lfilter(x):
# Initial conditions "ic" can be used for the calculation, the
# next two lines detail that. A simple scaling of x[0] achieves
# the same in the 1-d case
# zi = lfiltic([alpha], [1.0, -beta], y=[x[0]])
# x[1:], _ = lfilter([alpha], [1.0, -beta], x[1:], zi=zi)
x[0] /= alpha # scale start val, descaled in 1st op by alpha
return scipy.signal.lfilter([alpha], [1.0, -beta], x)
return self._apply(_sp_lfilter) # trigger __getattr__ for _apply
def _mean(self): # meant for ewm with dynamic alpha
def _dynalpha(vals):
# reuse vals: not the original series, it's the trailer abvoe
alphas = self._alpha_[self._alpha_p - 1:] # -1: get array idx
prev = vals[0] # seed value, which isn't part of the result
vals[0] = np.nan # made 1 tick longer to carry seed, nan it
for i, alphai in enumerate(alphas, 1): # tight-loop-calc
vals[i] = prev = prev + alphai * (vals[i] - prev)
return vals # can return vals, made Series via __getattr__
return self._apply(_dynalpha) # triggers __getattr__ for _apply
def __getattr__(self, attr):
if self._pval is not None and not self._is_seeded:
# window operation overlap with the 1st calc point ... -1
self._minperiod += self._pval - self._pearly - 1
# for a dynamic alpha, the period of the alpha can exceed minp
self._minperiod = max(self._minperiod, self._alpha_p)
op = getattr(self._multifunc, attr) # get real op/let exp propag
def call_op(*args, **kwargs): # actual op executor
result = pd.Series(np.nan, index=self._series.index) # prep
sargs = [] # cov takes an "other" parameter for example
for arg in args:
if isinstance(arg, Line):
arg = arg._series[self._minidx:]
sargs.append(arg)
result[self._minidx:] = r = op(*sargs, **kwargs) # run/store
result = result.astype(r.dtype, copy=False)
return self._line._clone(result, period=self._minperiod)
return call_op
def __getitem__(self, item):
return self._line._clone(self._series.iloc[item])
@property
def _seeded(self):
self._is_seeded = True # call if applied after a seed
return self
def real_multifunc_op(self, *args, **kwargs):
return _MultiFunc_Op(self, *args, **kwargs)
linesops.install_cls(name=name, attr=real_multifunc_op,
propertize=propertize)
class MetaLine(type):
def _line_from_dataframe(cls, self, df, colname):
# it must be dataframe(-like) with dimensions
colnames = [x.lower() for x in df.columns]
try:
idx = colnames.index(colname) # try first by name
except ValueError: # else pre-def index ... or default to 0
idx = config.OHLC_INDICES.get(colname, 0)
# TBD: In this situation the user could be made aware of the invalid
# inputindex (warning and reset to 0 or exception)
if idx >= len(colnames): # sanity check, not beyond possible
idx = 0 # default mapping if sanity check fails
# Finally, assign values
self._minperiod = 1
self._series = df.iloc[:, idx]
def __call__(cls, val=None, name='', index=None, *args, **kwargs):
self = cls.__new__(cls, *args, **kwargs) # create instance
# Process input
if isinstance(val, linesholder.LinesHolder):
val = val.outputs[0] # get 1st line and process
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Lines):
val = val[0] # get 1st line and process
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Line):
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, Line):
self._minperiod = val._minperiod
self._series = val._series
elif isinstance(val, pd.Series):
self._minperiod = 1
self._series = val
elif isinstance(val, pd.DataFrame):
cls._line_from_dataframe(self, val, name)
else:
# Don't know how to convert, store and pray
self._minperiod = 1
if index is None:
self._series = val # 1st column
else:
self._series = pd.Series(val, index=index)
self._name = name # fix the name of the data series
self.__init__(*args, **kwargs) # init instance
return self # return the instance
class Line(metaclass=MetaLine):
_minperiod = 1
_series = None
_name = None
def __hash__(self):
return super().__hash__()
# Install the different proxy operations
for name in linesops._BINOPS:
binary_op(name)
for name, opargs in linesops._REDOPS.items():
reduction_op(name, **opargs)
for name, opargs in linesops._STDOPS.items():
standard_op(name, **opargs)
for name, opargs in linesops._MULTIFUNCOPS.items():
multifunc_op(name, **opargs)
def __call__(self, ago=0, val=np.nan):
if ago:
return self.shift(periods=-ago)
if ago is None:
val = None # called as in (None, ...) ago wasn't meant
if val is None:
val = self._series.copy()
return self._clone(val, index=self._series.index)
def __iter__(self):
return iter(self._series)
def __len__(self):
return len(self._series)
def __getitem__(self, item):
return self._clone(self._series.iloc[item])
def __setitem__(self, item, value):
self._series[item] = value
def _clone(self, series, period=None, index=None):
line = self.__class__(series, index=index)
line._minperiod = period or self._minperiod
return line
@property
def mpseries(self):
return self._series[self._minperiod - 1:]
@property
def series(self):
return self._series.rename(self._name, inplace=True)
@property
def index(self):
return self._series.index
def _period(self, period, rolling=False, val=None):
# return the line with the period increased by period
inc = period - rolling
if not inc:
return self
if val is not None: # set entire changed period to val
idx0 = self._minperiod - 1
idx1 = idx0 + (inc or 1) # maybe no period inc only setval
if idx1 < idx0: # inc is negative ...
idx0, idx1 = idx1, idx0
self._series[idx0:idx1] = val
self._minperiod += inc
return self
def _setval(self, i0=0, i1=0, val=np.nan):
# set a value relative to minperiod as start.
if not i0 and not i1:
self._series[self._minperiod - 1:i1] = val
else:
i0 = self._minperiod - 1 + i0
if i1 >= 0:
i1 = i0 + (i1 or 1) # i1 rel to i0 or extend i0 by 1 for singl value
self._series[i0:i1] = val
return self
def _minperiodize(self, *args, raw=False, **kwargs):
# apply func, adding args and kwargs
minpers = [self._minperiod]
minpers.extend(getattr(x, '_minperiod', 1) for x in args)
minpers.extend(getattr(x, '_minperiod', 1) for x in kwargs.values())
minperiod = max(minpers) # max of any series involved in op
minidx = minperiod - 1 # minperiod is 1-based, easier for location
nargs = []
for x in args:
x = getattr(x, '_series', x)
if isinstance(x, pd.Series):
x = x[minidx:]
if raw:
x = x.to_numpy()
nargs.append(x)
nkwargs = {}
for k, x in kwargs.items():
x = getattr(x, '_series', x)
if isinstance(x, pd.Series):
x = x[minidx:]
if raw:
x = x.to_numpy()
nkwargs[k] = x
return minperiod, minidx, nargs, nkwargs
def _apply(self, func, *args, raw=False, **kwargs):
minperiod, minidx, a, kw = self._minperiodize(*args, raw=raw, **kwargs)
sarray = self._series[minidx:]
if raw:
sarray = sarray.to_numpy(copy=True) # let caller modify the buffer
result = | pd.Series(np.nan, index=self._series.index) | pandas.Series |
from src.evaluation.gnn_evaluation_module import eval_gnn
from src.models.gat_models import MonoGAT#, BiGAT, TriGAT
from src.models.rgcn_models import MonoRGCN, RGCN2
from src.models.appnp_model import MonoAPPNPModel
from src.models.multi_layered_model import MonoModel#, BiModel, TriModel
from torch_geometric.nn import GCNConv, SAGEConv, GATConv, RGCNConv, SGConv, APPNP, ClusterGCNConv
from src.data.data_loader import GraphDataset
import warnings
import pandas as pd
import os
import argparse
import numpy as np
import pickle
import torch
from src.evaluation.network_split import NetworkSplitShchur
from src.data.create_modified_configuration_model import generate_modified_conf_model
from torch_geometric.utils import from_networkx, to_networkx
from community import best_partition
import networkx as nx
def parse_args():
parser = argparse.ArgumentParser(description="Test accuracy for GCN/SAGE/GAT/RGCN/SGC/APPNP")
parser.add_argument('--size',
type=int,
default=96,
help='Channel size. Default is 12.')
parser.add_argument('--lr',
type=float,
default=0.01,
help='Learning rate. Default is 0.01.')
parser.add_argument('--wd',
type=float,
default=0.01,
help='Regularization weight. Default is 0.01.')
parser.add_argument('--dropout',
type=float,
default=0.8,
help='Dropout probability. Default is 0.6.')
parser.add_argument('--conf',
type=bool,
default=False,
help='Is configuration model evaluation. Default is False.')
parser.add_argument('--shifting',
type=bool,
default=False,
help='Is shifting evaluation. Default is False.')
parser.add_argument('--sbm',
type=bool,
default=False,
help='Is SBM evaluation. Default is False.')
parser.add_argument('--sbm_label',
type=bool,
default=False,
help='Is SBM_label evaluation. Default is False.')
parser.add_argument('--flipped',
type=bool,
default=False,
help='Evaluating with flipped edges? Default is False.')
parser.add_argument('--removed_hubs',
type=bool,
default=False,
help='Evaluating with removed hubs? Default is False.')
parser.add_argument('--added_2hop_edges',
type=bool,
default=False,
help='Evaluating with added 2-hop edges? Default is False.')
parser.add_argument('--label_sbm',
type=bool,
default=False,
help='Evaluating with SBMs created from labels? Default is False.')
parser.add_argument('--heads',
type=int,
default=4,
help='Attention heads. Default is 4.')
parser.add_argument('--attention_dropout',
type=float,
default=0.4,
help='Attention dropout for GAT. Default is 0.4.')
parser.add_argument('--dataset',
default="cora",
help='Dataset name. Default is cora.')
parser.add_argument('--model',
default="gcn",
help='Model name. Default is GCN.')
parser.add_argument('--splits',
type=int,
default=100,
help='Number of random train/validation/test splits. Default is 100.')
parser.add_argument('--runs',
type=int,
default=20,
help='Number of random initializations of the model. Default is 20.')
parser.add_argument('--conf_inits',
type=int,
default=10,
help='Number of configuration model runs. Default is 10.')
parser.add_argument('--sbm_inits',
type=int,
default=10,
help='Number of SBM runs. Default is 10.')
parser.add_argument('--directionality',
default='undirected',
help='Directionality: undirected/directed/reversed. Default is undirected.')
parser.add_argument('--train_examples',
type=int,
default=20,
help='Number of training examples per class. Default is 20.')
parser.add_argument('--val_examples',
type=int,
default=30,
help='Number of validation examples per class. Default is 30.')
args = parser.parse_args()
return args
name2conv = {'gcn': GCNConv, 'sage': SAGEConv, 'gat': GATConv, 'rgcn': RGCNConv, 'rgcn2':RGCN2, 'sgc':SGConv, 'appnp':APPNP, 'cgcn':ClusterGCNConv}
def eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads,attention_dropout,runs,splits,train_examples,val_examples, models=[MonoGAT],isDirected = False):
if isDirected:
models = [MonoGAT]
return eval_gnn(dataset, dataset_name, GATConv, channel_size, dropout, lr, wd, heads=heads, attention_dropout=attention_dropout,
models=models, num_runs=runs, num_splits=splits, test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_gcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoModel], isDirected=False):
if isDirected:
models = [MonoModel]
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_appnp(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoAPPNPModel]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval_archs_rgcn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, runs,splits,train_examples,val_examples, models=[MonoRGCN]):
return eval_gnn(dataset, dataset_name, conv, channel_size, dropout, lr, wd, heads=1,attention_dropout=0.3, # dummy values for heads and attention_dropout
models=models, num_runs=runs, num_splits=splits,test_score=True,
train_examples = train_examples, val_examples = val_examples)
def eval(model, dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, runs, splits, train_examples, val_examples, isDirected):
if model == 'gat':
return eval_archs_gat(dataset, dataset_name, channel_size, dropout, lr, wd, heads, attention_dropout, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
elif model == 'rgcn' or model == 'rgcn2':
return eval_archs_rgcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
elif model == 'appnp':
return eval_archs_appnp(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples)
else:
return eval_archs_gcn(dataset, dataset_name, name2conv[model], channel_size, dropout, lr, wd, splits=splits, runs=runs, train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
def eval_original(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_shuffled_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = dataset.x[torch.randperm(dataset.x.size()[0])]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_random_features(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}', dataset_name,
f'data/graphs/processed/{dataset_name}/{dataset_name}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
dataset.x = torch.randint(0, 2, dataset.x.shape, dtype=torch.float)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_cm_communities(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}-cm_communities-{i}', dataset_name,
f'data/graphs/cm_communities/{dataset_name}/{dataset_name}_cm_communities_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# G = to_networkx(dataset)
# G = nx.DiGraph(G)
# node_communities = best_partition(nx.to_undirected(G))
# nx.set_node_attributes(G,node_communities,'label')
# # print(dataset.edge_index)
# old_edges = dataset.edge_index
# G = generate_modified_conf_model(G)
# # dir_path = f'data/graphs/cm_communities/{dataset_name}'
# # if not os.path.exists(dir_path):
# # os.mkdir(dir_path)
# # nx.write_edgelist(G, f'{dir_path}/{dataset_name}_cm_communities_{i}.cites')
# dataset.edge_index = torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)
# print((torch.tensor(data=np.array(list(G.edges)).T,dtype=torch.long)-old_edges).abs().sum())
# print(dataset.edge_index)
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['graph'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_random(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, random_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(random_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-random{i}', dataset_name,
f'data/graphs/random/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['random_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_erdos(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, erdos_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(erdos_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-erdos{i}', dataset_name,
f'data/graphs/erdos/{dataset_name}/{dataset_name}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['erdos_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
# print(f'data/graphs/injected_edges/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites')
# print(dataset.edge_index.shape)
# print(dataset.edge_index)
# if last_edge is None:
# last_edge = dataset.edge_index
# continue
# print((1-last_edge.eq(last_edge).double()).sum())
# continue
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_degree_cat(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
e = num_edges
hubs_experiment = 'global_edges'
for i in range(inits):
for frm in range(0,100,percentile):
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_degree_cat/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_constant_nodes(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for frm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
to = frm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio}nodes_{i}_{hubs_experiment}_{frm}_to_{to}', dataset_name,
f'data/graphs/injected_edges_constant_nodes/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_{frm}_to_{to}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['from'] = frm
df_cur['to'] = to
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_attack_target(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, control_ratio, edges_per_node, percentile):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
hubs_experiment = 'global_edges'
for atkfrm in range(0,100,percentile):
for tgtfrm in range(0,100,percentile):
for i in range(inits):
for e in edges_per_node:
atkto = atkfrm + percentile
tgtto = tgtfrm + percentile
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_{e}edges_{control_ratio:.3f}nodes_{i}_{hubs_experiment}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}', dataset_name,
f'data/graphs/injected_edges_attack_target/{dataset_name}/{dataset_name}_global_edges{e}_nodes{control_ratio:.3f}_{i}_atk{atkfrm}_{atkto}_tgt{tgtfrm}_{tgtto}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['edges_per_node'] = e
df_cur['control_ratio'] = control_ratio
df_cur['atkfrm'] = atkfrm
df_cur['atkto'] = atkto
df_cur['tgtfrm'] = tgtfrm
df_cur['tgtto'] = tgtto
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_injected_edges_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, inits, num_edges, hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
last_edge = None
for e in num_edges:
for i in range(inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-injected_sbm_{e}_{i}_{hubs_experiment}', dataset_name,
f'data/graphs/injected_edges_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}_{e}_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['init_num'] = i
df_cur['injected_edges'] = e
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_label_sbm(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples,hubs_experiment):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-label_sbm_{hubs_experiment}', dataset_name,
f'data/graphs/label_sbm/{dataset_name}/{dataset_name}_{hubs_experiment}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads, attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
return df_cur
def eval_conf(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, conf_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = pd.DataFrame()
for i in range(conf_inits):
dataset = GraphDataset(f'data/tmp/{dataset_name}{("_" + directionality) if isDirected else ""}-confmodel{i}', dataset_name,
f'data/graphs/confmodel/{dataset_name}/{dataset_name}_confmodel_{i}.cites',
f'data/graphs/processed/{dataset_name}/{dataset_name}.content',
directed=isDirected, reverse=isReversed)[0]
df_cur = eval(model=model, dataset=dataset, dataset_name=dataset_name, channel_size=size, lr=lr, splits=splits, runs=runs,
dropout=dropout, wd=wd, heads=heads,attention_dropout=attention_dropout,
train_examples = train_examples, val_examples = val_examples,isDirected=isDirected)
df_cur['confmodel_num'] = i
df_val = pd.concat([df_val, df_cur])
return df_val
def eval_shifting(model, dataset_name, directionality, size, dropout, lr, wd, heads,attention_dropout,
splits, runs, train_examples, val_examples, shifting_inits):
isDirected = (directionality != 'undirected')
isReversed = (directionality == 'reversed')
df_val = | pd.DataFrame() | pandas.DataFrame |
import functools
import numpy as np
import scipy
import scipy.linalg
import scipy
import scipy.sparse as sps
import scipy.sparse.linalg as spsl
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import logging
import tables as tb
import os
import sandy
import pytest
pd.options.display.float_format = '{:.5e}'.format
__author__ = "<NAME>"
__all__ = [
"CategoryCov",
"EnergyCov",
"triu_matrix",
"corr2cov",
"random_corr",
"random_cov",
"sample_distribution",
]
S = np.array([[1, 1, 1],
[1, 2, 1],
[1, 3, 1]])
var = np.array([[0, 0, 0],
[0, 2, 0],
[0, 0, 3]])
minimal_covtest = pd.DataFrame(
[[9437, 2, 1e-2, 9437, 2, 1e-2, 0.02],
[9437, 2, 2e5, 9437, 2, 2e5, 0.09],
[9437, 2, 1e-2, 9437, 102, 1e-2, 0.04],
[9437, 2, 2e5, 9437, 102, 2e5, 0.05],
[9437, 102, 1e-2, 9437, 102, 1e-2, 0.01],
[9437, 102, 2e5, 9437, 102, 2e5, 0.01]],
columns=["MAT", "MT", "E", "MAT1", "MT1", 'E1', "VAL"]
)
def cov33csv(func):
def inner(*args, **kwargs):
key = "<KEY>"
kw = kwargs.copy()
if key in kw:
if kw[key]:
print(f"found argument '{key}', ignore oher arguments")
out = func(
*args,
index_col=[0, 1, 2],
header=[0, 1, 2],
)
out.index.names = ["MAT", "MT", "E"]
out.columns.names = ["MAT", "MT", "E"]
return out
else:
del kw[key]
out = func(*args, **kw)
return out
return inner
class _Cov(np.ndarray):
"""Covariance matrix treated as a `numpy.ndarray`.
Methods
-------
corr
extract correlation matrix
corr2cov
produce covariance matrix given correlation matrix and standard
deviation array
eig
get covariance matrix eigenvalues and eigenvectors
get_L
decompose and extract lower triangular matrix
sampling
draw random samples
"""
def __new__(cls, arr):
obj = np.ndarray.__new__(cls, arr.shape, float)
obj[:] = arr[:]
if not obj.ndim == 2:
raise sandy.Error("covariance matrix must have two dimensions")
if not np.allclose(obj, obj.T):
raise sandy.Error("covariance matrix must be symmetric")
if (np.diag(arr) < 0).any():
raise sandy.Error("covariance matrix must have positive variances")
return obj
@staticmethod
def _up2down(self):
U = np.triu(self)
L = np.triu(self, 1).T
C = U + L
return C
def eig(self):
"""
Extract eigenvalues and eigenvectors.
Returns
-------
`Pandas.Series`
real part of eigenvalues sorted in descending order
`np.array`
matrix of eigenvectors
"""
E, V = scipy.linalg.eig(self)
E, V = E.real, V.real
return E, V
def corr(self):
"""Extract correlation matrix.
.. note:: zeros on the covariance matrix diagonal are translated
into zeros also on the the correlation matrix diagonal.
Returns
-------
`sandy.formats.utils.Cov`
correlation matrix
"""
std = np.sqrt(np.diag(self))
with np.errstate(divide='ignore', invalid='ignore'):
coeff = np.true_divide(1, std)
coeff[~ np.isfinite(coeff)] = 0 # -inf inf NaN
corr = np.multiply(np.multiply(self.T, coeff).T, coeff)
return self.__class__(corr)
def _reduce_size(self):
"""
Reduces the size of the matrix, erasing the null values.
Returns
-------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
"""
nonzero_idxs = np.flatnonzero(np.diag(self))
cov_reduced = self[nonzero_idxs][:, nonzero_idxs]
return nonzero_idxs, cov_reduced
@classmethod
def _restore_size(cls, nonzero_idxs, cov_reduced, dim):
"""
Restore the size of the matrix
Parameters
----------
nonzero_idxs : numpy.ndarray
The indices of the diagonal that are not null.
cov_reduced : sandy.core.cov._Cov
The reduced matrix.
dim : int
Dimension of the original matrix.
Returns
-------
cov : sandy.core.cov._Cov
Matrix of specified dimensions.
"""
cov = _Cov(np.zeros((dim, dim)))
for i, ni in enumerate(nonzero_idxs):
cov[ni, nonzero_idxs] = cov_reduced[i]
return cov
def sampling(self, nsmp, seed=None):
"""
Extract random samples from the covariance matrix, either using
the cholesky or the eigenvalue decomposition.
Parameters
----------
nsmp : `int`
number of samples
seed : `int`
seed for the random number generator (default is `None`)
Returns
-------
`np.array`
2D array of random samples with dimension `(self.shape[0], nsmp)`
"""
dim = self.shape[0]
np.random.seed(seed=seed)
y = np.random.randn(dim, nsmp)
nonzero_idxs, cov_reduced = self._reduce_size()
L_reduced = cov_reduced.get_L()
L = self.__class__._restore_size(nonzero_idxs, L_reduced, dim)
samples = np.array(L.dot(y))
return samples
def get_L(self):
"""
Extract lower triangular matrix `L` for which `L*L^T == self`.
Returns
-------
`np.array`
lower triangular matrix
"""
try:
L = scipy.linalg.cholesky(
self,
lower=True,
overwrite_a=False,
check_finite=False
)
except np.linalg.linalg.LinAlgError:
E, V = self.eig()
E[E <= 0] = 0
Esqrt = np.diag(np.sqrt(E))
M = V.dot(Esqrt)
Q, R = scipy.linalg.qr(M.T)
L = R.T
return L
class CategoryCov():
"""
Properties
----------
data
covariance matrix as a dataframe
size
first dimension of the covariance matrix
Methods
-------
corr2cov
create a covariance matrix given a correlation matrix and a standard
deviation vector
from_stack
create a covariance matrix from a stacked `pd.DataFrame`
from_stdev
construct a covariance matrix from a stdev vector
from_var
construct a covariance matrix from a variance vector
get_corr
extract correlation matrix from covariance matrix
get_eig
extract eigenvalues and eigenvectors from covariance matrix
get_L
extract lower triangular matrix such that $C=L L^T$
get_std
extract standard deviations from covariance matrix
invert
calculate the inverse of the matrix
sampling
extract perturbation coefficients according to chosen distribution
and covariance matrix
"""
def __repr__(self):
return self.data.__repr__()
def __init__(self, *args, **kwargs):
self.data = | pd.DataFrame(*args, **kwargs) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Th Jun 6 11:13:11 2019
@author: inesverissimo
Do SOMA contrasts and save outputs
"""
import os, json
import sys, glob
import re
import numpy as np
import pandas as pd
import nibabel as nb
from nilearn import surface
from nistats.design_matrix import make_first_level_design_matrix
from nistats.first_level_model import run_glm
from nistats.contrasts import compute_contrast
from utils import * #import script to use relevante functions
# define participant number and open json parameter file
if len(sys.argv)<2:
raise NameError('Please add subject number (ex:01) '
'as 1st argument in the command line!')
else:
sj = str(sys.argv[1]).zfill(2) #fill subject number with 0 in case user forgets
with open('analysis_params.json','r') as json_file:
analysis_params = json.load(json_file)
# use smoothed data?
with_smooth = analysis_params['with_smooth']
if sj == 'median':
allsubdir = glob.glob(os.path.join(analysis_params['post_fmriprep_outdir'],'soma','sub-*/'))
alleventdir = glob.glob(os.path.join(analysis_params['sourcedata_dir'],'sub-*/'))
else: # if individual subject
allsubdir = glob.glob(os.path.join(analysis_params['post_fmriprep_outdir'],'soma','sub-{sj}'.format(sj=sj)))
alleventdir = glob.glob(os.path.join(analysis_params['sourcedata_dir'],'sub-{sj}'.format(sj=sj)))
allsubdir.sort()
alleventdir.sort()
onsets_allsubs = []
durations_allsubs = []
for idx,subdir in enumerate(allsubdir): #loop over all subjects in defined list
print('functional files from %s'%allsubdir[idx])
print('event files from %s'%alleventdir[idx])
# define paths and list of files
filepath = glob.glob(os.path.join(subdir,'*'))
eventpath = glob.glob(os.path.join(alleventdir[idx],'*','func/*'))
# changes depending on data used
if with_smooth=='True':
# soma out path
soma_out = os.path.join(analysis_params['soma_outdir'],'sub-{sj}'.format(sj=sj),'run-median','smooth%d'%analysis_params['smooth_fwhm'])
# last part of filename to use
file_extension = 'sg_psc_smooth%d.func.gii'%analysis_params['smooth_fwhm']
else:
# soma out path
soma_out = os.path.join(analysis_params['soma_outdir'],'sub-{sj}'.format(sj=sj),'run-median')
# last part of filename to use
file_extension = 'sg_psc.func.gii'
# list of functional files
filename = [run for run in filepath if 'soma' in run and 'fsaverage' in run and run.endswith(file_extension)]
filename.sort()
if not os.path.exists(soma_out): # check if path to save median run exist
os.makedirs(soma_out)
# list of stimulus onsets
events = [run for run in eventpath if 'soma' in run and run.endswith('events.tsv')]
events.sort()
TR = analysis_params["TR"]
# load and stack median run for subject
data_both=[]
for hemi_label in ['hemi-L','hemi-R']:
filestring = os.path.join(subdir,'{sj}_ses-*_task-soma_run-median_space-fsaverage_{hemi}_{ext}'.format(sj=os.path.split(os.path.split(subdir)[0])[1],
hemi=hemi_label,
ext=file_extension))
absfile = glob.glob(filestring) #absolute filename for median run
if not absfile: #if list is empty (no median run)
print('%s doesn\'t exist' %(filestring))
# list with absolute files to make median over
run_files = [os.path.join(subdir,file) for _,file in enumerate(os.listdir(subdir))
if 'sub-{sj}'.format(sj=str(sj).zfill(2)) in file and
'_{hemi}'.format(hemi=hemi_label) in file and
'_{ext}'.format(ext=file_extension) in file]
run_files.sort()
#compute and save median run
file_hemi = median_gii(run_files,subdir)
print('averaged %d runs, computed %s' %(len(run_files),file_hemi))
# load surface data from path and append both hemi in array
data_both.append(surface.load_surf_data(file_hemi).T)
print('loading %s' %file_hemi)
else:
# load surface data from path and append both hemi in array
data_both.append(surface.load_surf_data(absfile[0]).T)
print('loading %s' %absfile[0])
# stack them to get 2D array
median_data = np.hstack(data_both)
if idx == 0:
median_sub = median_data[np.newaxis,:,:]
else:
median_sub = np.vstack((median_sub,median_data[np.newaxis,:,:]))
# Append all events in same dataframe
print('Loading events')
all_events = []
for _,val in enumerate(events):
events_pd = pd.read_csv(val,sep = '\t')
new_events = []
for ev in events_pd.iterrows():
row = ev[1]
if row['trial_type'][0] == 'b': # if both hand/leg then add right and left events with same timings
new_events.append([row['onset'],row['duration'],'l'+row['trial_type'][1:]])
new_events.append([row['onset'],row['duration'],'r'+row['trial_type'][1:]])
else:
new_events.append([row['onset'],row['duration'],row['trial_type']])
df = | pd.DataFrame(new_events, columns=['onset','duration','trial_type']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
#导入数据集
def read_data(base_info_path,
annual_report_info_path,
tax_info_path,
change_info_path,
news_info_path,
other_info_path,
entprise_info_path,
):
base_info = pd.read_csv(base_info_path) # 企业基本信息
annual_report_info = pd.read_csv(annual_report_info_path)
tax_info = pd.read_csv(annual_report_info_path)
change_info = pd.read_csv(change_info_path)
news_info = | pd.read_csv(news_info_path) | pandas.read_csv |
import pandas as pd
from tqdm import tqdm
import os
genidlist=[]
LabGenID=[]
Labeldf = pd.read_csv('AMR_LAbel_EColi.csv', sep=",", dtype=str, low_memory=True)
selectedf = Labeldf[['genome_id']]
Antiboticslist=Labeldf.columns.values.tolist()
Antiboticslist.remove('Unnamed: 0')
Antiboticslist.remove( 'genome_id')
Antiboticslist.remove('genome_name')
Labeldf = Labeldf.replace(to_replace=['Susceptible', 'Intermediate', 'Resistant','susceptible', 'intermediate', 'resistant'], value=[0, 0.5, 1,0, 0.5, 1])
for genid in selectedf.values:
LabGenID.append(genid)
def PopulateAMRProteinMatrix(Dir,Filename,matrixdf):
pd.options.display.float_format = '{:,.5f}'.format
df = pd.read_csv(Dir + Filename, sep="\t", index_col=5, dtype=str, low_memory=True)
genid=str((df['genome_id'].iloc[0]))
#print (genid)
#print(genidlist)
if(genid in LabGenID) and (genid not in genidlist):
matrixdf = matrixdf.append({'genome_id': genid, 'genome_name':(Labeldf[Labeldf['genome_id'] == genid])['genome_name'].values, 'taxon_id':(Labeldf[Labeldf['genome_id'] == genid])['taxon_id'].values},
ignore_index=True)
for Antiboitics in Antiboticslist:
matrixdf.loc[matrixdf['genome_id'] == genid, Antiboitics] = (Labeldf[Labeldf['genome_id'] == genid])[Antiboitics].values
selectedf = df[['genome_id', 'genome_name', 'plfam_id']]
selectedf = selectedf.dropna(subset=['plfam_id'])
genidlist.append(genid)
for genid, gennam, plfam_id in selectedf.values:
if plfam_id not in matrixdf.columns:
finaldf2 = pd.DataFrame({plfam_id: [0]})
matrixdf = matrixdf.join(finaldf2)
matrixdf.loc[matrixdf['genome_id'] == genid, plfam_id] = 1
elif (genid in LabGenID):
print ('Duplicate')
else:
print ("Not in Lab based")
return matrixdf
def ReadFromFeaturesFolders(path, Matdf: object):
for filename in tqdm(os.listdir(path)):
Matdf = PopulateAMRProteinMatrix(path, filename, Matdf)
#Matdf.to_csv('Test_Multilabel_final.csv', sep=',')
return Matdf
finaldf = | pd.DataFrame({'genome_id':[''],'genome_name':[''],'taxon_id':[''],'ampicillin':[0], 'amoxicillin/clavulanic acid':[0], 'aztreonam':[0], 'cefepime':[0], 'cefotaxime':[0], 'cefoxitin':[0], 'ceftazidime':[0], 'ciprofloxacin':[0], 'gentamicin':[0], 'piperacillin/tazobactam':[0], 'sulfamethoxazole/trimethoprim':[0], 'tobramycin':[0], 'trimethoprim':[0]
}) | pandas.DataFrame |
from collections import defaultdict
from typing import DefaultDict
import pandas as pd
import pickle
from pathlib import Path
import numpy as np
import argparse
def process_dataframe(list_of_results):
results = pd.DataFrame(list_of_results,
columns=['user', 'model', 'image_type',
'attack_type', 'image_idx',
'attack_img', 'rmse', 'score',
'success'])
results['success'].fillna(0, inplace=True)
results['success'] = results['success'].astype(bool)
results['model'] = results['model'].str.replace('model\_cnn\_linear',
'SigNet & Linear')
results['model'] = results['model'].str.replace('model\_cnn\_rbf',
'SigNet & RBF')
results['model'] = results['model'].str.replace('model\_lbp\_linear',
'CLBP & Linear')
results['model'] = results['model'].str.replace('model\_lbp\_rbf',
'CLBP & RBF')
return results
def load_results(fname):
with open(fname, 'rb') as f:
gen, forg = pickle.load(f)
results_genuine = process_dataframe(gen)
results_forgery = process_dataframe(forg)
return results_genuine, results_forgery
def format_pct(x):
if np.isnan(x):
return '-'
return '%.2f' % (x * 100)
def format_normal(x):
if np.isnan(x):
return '-'
else:
return '%.2f' % x
parser = argparse.ArgumentParser()
parser.add_argument('results_folder')
args = parser.parse_args()
knowledge_scenarios = ['pk', 'lk1', 'lk2']
defense_models = ['', 'ensadv', 'madry']
datasets = ['mcyt', 'cedar', 'brazilian', 'gpds']
base_path = Path(args.results_folder)
# base_path = Path('~/runs/adv/').expanduser()
all_results_genuine = []
all_results_forgery = []
all_results_genuine_bydataset = defaultdict(list)
all_results_forgery_bydataset = defaultdict(list)
for k in knowledge_scenarios:
for model in defense_models:
for d in datasets:
if model == '':
model_ = model
modelname = 'baseline'
else:
model_ = '{}_'.format(model)
modelname = model
filename = base_path / '{}_cnn_half_{}{}.pickle'.format(d, model_, k)
results_genuine, results_forgery = load_results(filename)
results_genuine['knowledge'] = k
results_genuine['defense'] = modelname
results_genuine['dataset'] = d
results_forgery['knowledge'] = k
results_forgery['defense'] = modelname
results_forgery['dataset'] = d
all_results_genuine.append(results_genuine)
all_results_forgery.append(results_forgery)
all_results_genuine_bydataset[d].append(results_genuine)
all_results_forgery_bydataset[d].append(results_forgery)
def print_results(results_genuine, results_forgery):
df = results_genuine[(results_genuine['attack_type'] == 'fgm') |
(results_genuine['attack_type'] == 'carlini')].drop(columns=['attack_img'])
# Fixing the order:
df.loc[df['knowledge'] == 'pk', 'knowledge'] = '_pk'
df.loc[df['attack_type'] == 'fgm', 'attack_type'] = '_fgm'
pd.set_option('display.float_format', format_pct)
g = df.groupby(['defense', 'model', 'knowledge', 'attack_type'])
subset = g[['success']].mean()
p = subset.reset_index().pivot_table(index=['defense', 'model'],
values='success',
columns=['attack_type', 'knowledge'])
print('Genuine, success')
print(p.to_latex())
pd.set_option('display.float_format', format_normal)
only_success = df[df['success'] == True]
g = only_success.groupby(['defense', 'model', 'knowledge', 'attack_type'])
subset = g[['rmse']].mean()
p = subset.reset_index().pivot_table(index=['defense', 'model'],
values='rmse',
columns=['attack_type', 'knowledge'])
pd.set_option('display.float_format', format_normal)
print('Genuine, RMSE')
print(p.to_latex())
df = results_forgery[(results_forgery['attack_type'] == 'fgm') |
(results_forgery['attack_type'] == 'carlini')].drop(columns=['attack_img'])
# Fixing the order:
df.loc[df['knowledge'] == 'pk', 'knowledge'] = '_pk'
df.loc[df['attack_type'] == 'fgm', 'attack_type'] = '_fgm'
pd.set_option('display.float_format', format_pct)
g = df.groupby(['defense', 'model', 'knowledge', 'attack_type', 'image_type'])
subset = g[['success']].mean()
p = subset.reset_index().pivot_table(index=['defense', 'model', 'image_type'],
values='success',
columns=['attack_type', 'knowledge'])
print('Forgery, success')
print(p.to_latex())
only_success = df[df['success'] == True]
g = only_success.groupby(['defense', 'model', 'knowledge', 'attack_type', 'image_type'])
subset = g[['rmse']].mean()
p = subset.reset_index().pivot_table(index=['defense', 'model', 'image_type'],
values='rmse',
columns=['attack_type', 'knowledge'])
pd.set_option('display.float_format', format_normal)
print('Forgery, RMSE')
print(p.to_latex())
results_genuine = | pd.concat(all_results_genuine) | pandas.concat |
import bs4
import requests
import lxml
import pandas as pd
import re
import os
total_page = 5
data_df = | pd.DataFrame(columns=["Reviews","Date","Rating"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 13:55:53 2021
@author: Clement
"""
import pandas
import geopandas as gpd
import numpy
import os
import sys
import datetime
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gen_fct import file_fct
from gen_fct import df_fct
def last_update_db (dir_name, db_list):
list_dir, list_files = file_fct.list_dir_files(f'{dir_name}')
db_daily = db_list[db_list.loc[:,'update']==True]
if 'last_update.json' in list_files:
last_update = pandas.read_json(f'{dir_name}/last_update.json', orient = "table")
last_update['delta_day'] = last_update.apply(lambda x: (pandas.to_datetime('today')-x["date"]).days,axis=1)
print(last_update)
print('\n')
else:
last_update = pandas.DataFrame(index=db_daily.index, columns=['date', 'delta_day'])
last_update.loc[:,'delta_day'] = 100 #Arbitrary value
return last_update
def import_and_save(df_name, root, source_df):
save_path = os.path.normcase(f'{root}{source_df.loc[df_name, "sub_dir"]}/{source_df.loc[df_name, "file_name"]}')
file_fct.creation_folder(root,[source_df.loc[df_name, "sub_dir"]])
if source_df.loc[df_name, 'type'] == 'Pandas':
importing_df = pandas.read_csv(source_df.loc[df_name, 'link'],
sep=source_df.loc[df_name, 'sep'],
encoding=source_df.loc[df_name, 'encoding'])
importing_df.to_csv(save_path, index=False, sep=source_df.loc[df_name, 'sep'])
elif source_df.loc[df_name, 'type'] == 'GeoPandas':
importing_df = gpd.read_file(source_df.loc[df_name, 'link'])
importing_df.to_file(save_path, index=False)
return importing_df
def import_static (data_dir, db_list):
raw_data_dir = os.path.normcase(f'{data_dir}/raw')
list_dir, list_files = file_fct.list_dir_files(raw_data_dir)
df_static = db_list[db_list.loc[:,'update']==False]
for a_df_name in df_static.index:
if df_static.loc[a_df_name, 'file_name'] not in list_files:
print(f"Downloading {df_static.loc[a_df_name, 'file_name']}...", end='\x1b[1K\r')
import_and_save(a_df_name, raw_data_dir, df_static)
print(f"{df_static.loc[a_df_name, 'file_name']} downloaded")
print('\n\n')
def import_daily (data_dir, db_list, last_update_db, limit):
raw_data_dir = os.path.normcase(f'{data_dir}/raw')
df_daily = db_list[db_list.loc[:,'update']==True]
for a_df_name in df_daily.index:
if a_df_name not in last_update_db.index:
print(f"Creating and downloading {df_daily.loc[a_df_name, 'file_name']}...", end='')
df = import_and_save(a_df_name, raw_data_dir, df_daily)
delta_spaces = " "*(len(f"Creating and downloading {df_daily.loc[a_df_name, 'file_name']}...")-len(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded"))
print(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded {delta_spaces}")
last_update = get_dates (df, a_df_name, db_list)
last_update_db.loc[a_df_name, 'date'] = last_update
elif last_update_db.loc[a_df_name, 'delta_day'] > limit:
print(f"Downloading {df_daily.loc[a_df_name, 'file_name']}...", end='')
df = import_and_save(a_df_name, raw_data_dir, df_daily)
delta_spaces = " "*(len(f"Downloading {df_daily.loc[a_df_name, 'file_name']}...")-len(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded"))
print(f"\r{df_daily.loc[a_df_name, 'file_name']} was downloaded {delta_spaces}")
last_update = get_dates (df, a_df_name, db_list)
last_update_db.loc[a_df_name, 'date'] = last_update
data_dir = file_fct.get_parent_dir(2, 'data')
last_update_db['delta_day'] = last_update_db.apply(lambda x: ( | pandas.to_datetime('today') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 11 23:13:13 2019
@author: gaurav
"""
from sklearn.externals import joblib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
df = | pd.read_csv('heart.csv') | pandas.read_csv |
#
# Authors: Security Intelligence Team within the Security Coordination Center
#
# Copyright (c) 2018 Adobe Systems Incorporated. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from typing import Union
import pandas as pd
import numbers
sys.path.append('')
from osas.core.interfaces import Datasource, DataColumn
class CSVDataColumn(DataColumn):
def __init__(self, data: pd.DataFrame):
super(CSVDataColumn, self).__init__()
self._data = data
def mean(self) -> float:
return self._data.mean()
def std(self) -> float:
return self._data.std()
def min(self) -> any:
return self._data.min()
def max(self) -> any:
return self._data.max()
def unique(self) -> list:
return pd.unique(self._data)
def value_counts(self) -> dict:
return self._data.value_counts()
def tolist(self) -> list:
return list(self._data)
def apply(self, func) -> int:
self._data.apply(func)
def __len__(self) -> int:
return len(self._data)
def __getitem__(self, index: int) -> dict:
return self._data[index]
class CSVDataSource(Datasource):
def __init__(self, filename: str):
super().__init__()
self._data = | pd.read_csv(filename) | pandas.read_csv |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import OrderedDict
import numpy as np
import pandas as pd
import pytest
try:
import pyarrow as pa
except ImportError: # pragma: no cover
pa = None
from ....config import options, option_context
from ....dataframe import DataFrame
from ....tensor import arange, tensor
from ....tensor.random import rand
from ....tests.core import require_cudf
from ....utils import lazy_import
from ... import eval as mars_eval, cut, qcut
from ...datasource.dataframe import from_pandas as from_pandas_df
from ...datasource.series import from_pandas as from_pandas_series
from ...datasource.index import from_pandas as from_pandas_index
from .. import to_gpu, to_cpu
from ..to_numeric import to_numeric
from ..rebalance import DataFrameRebalance
cudf = lazy_import('cudf', globals=globals())
@require_cudf
def test_to_gpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
res = cdf.execute().fetch()
assert isinstance(res, cudf.DataFrame)
pd.testing.assert_frame_equal(res.to_pandas(), pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries)
cseries = series.to_gpu()
res = cseries.execute().fetch()
assert isinstance(res, cudf.Series)
pd.testing.assert_series_equal(res.to_pandas(), pseries)
@require_cudf
def test_to_cpu_execution(setup_gpu):
pdf = pd.DataFrame(np.random.rand(20, 30), index=np.arange(20, 0, -1))
df = from_pandas_df(pdf, chunk_size=(13, 21))
cdf = to_gpu(df)
df2 = to_cpu(cdf)
res = df2.execute().fetch()
assert isinstance(res, pd.DataFrame)
pd.testing.assert_frame_equal(res, pdf)
pseries = pdf.iloc[:, 0]
series = from_pandas_series(pseries, chunk_size=(13, 21))
cseries = to_gpu(series)
series2 = to_cpu(cseries)
res = series2.execute().fetch()
assert isinstance(res, pd.Series)
pd.testing.assert_series_equal(res, pseries)
def test_rechunk_execution(setup):
data = pd.DataFrame(np.random.rand(8, 10))
df = from_pandas_df(pd.DataFrame(data), chunk_size=3)
df2 = df.rechunk((3, 4))
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
data = pd.DataFrame(np.random.rand(10, 10), index=np.random.randint(-100, 100, size=(10,)),
columns=[np.random.bytes(10) for _ in range(10)])
df = from_pandas_df(data)
df2 = df.rechunk(5)
res = df2.execute().fetch()
pd.testing.assert_frame_equal(data, res)
# test Series rechunk execution.
data = pd.Series(np.random.rand(10,))
series = from_pandas_series(data)
series2 = series.rechunk(3)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
series2 = series.rechunk(1)
res = series2.execute().fetch()
pd.testing.assert_series_equal(data, res)
# test index rechunk execution
data = pd.Index(np.random.rand(10,))
index = from_pandas_index(data)
index2 = index.rechunk(3)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
index2 = index.rechunk(1)
res = index2.execute().fetch()
pd.testing.assert_index_equal(data, res)
# test rechunk on mixed typed columns
data = pd.DataFrame({0: [1, 2], 1: [3, 4], 'a': [5, 6]})
df = from_pandas_df(data)
df = df.rechunk((2, 2)).rechunk({1: 3})
res = df.execute().fetch()
pd.testing.assert_frame_equal(data, res)
def test_series_map_execution(setup):
raw = pd.Series(np.arange(10))
s = from_pandas_series(raw, chunk_size=7)
with pytest.raises(ValueError):
# cannot infer dtype, the inferred is int,
# but actually it is float
# just due to nan
s.map({5: 10})
r = s.map({5: 10}, dtype=float)
result = r.execute().fetch()
expected = raw.map({5: 10})
pd.testing.assert_series_equal(result, expected)
r = s.map({i: 10 + i for i in range(7)}, dtype=float)
result = r.execute().fetch()
expected = raw.map({i: 10 + i for i in range(7)})
pd.testing.assert_series_equal(result, expected)
r = s.map({5: 10}, dtype=float, na_action='ignore')
result = r.execute().fetch()
expected = raw.map({5: 10}, na_action='ignore')
pd.testing.assert_series_equal(result, expected)
# dtype can be inferred
r = s.map({5: 10.})
result = r.execute().fetch()
expected = raw.map({5: 10.})
pd.testing.assert_series_equal(result, expected)
r = s.map(lambda x: x + 1, dtype=int)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
def f(x: int) -> float:
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
def f(x: int):
return x + 1.
# dtype can be inferred for function
r = s.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series
raw2 = pd.Series([10], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2, dtype=float)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test arg is a md.Series, and dtype can be inferred
raw2 = pd.Series([10.], index=[5])
s2 = from_pandas_series(raw2)
r = s.map(s2)
result = r.execute().fetch()
expected = raw.map(raw2)
pd.testing.assert_series_equal(result, expected)
# test str
raw = pd.Series(['a', 'b', 'c', 'd'])
s = from_pandas_series(raw, chunk_size=2)
r = s.map({'c': 'e'})
result = r.execute().fetch()
expected = raw.map({'c': 'e'})
pd.testing.assert_series_equal(result, expected)
# test map index
raw = pd.Index(np.random.rand(7))
idx = from_pandas_index(pd.Index(raw), chunk_size=2)
r = idx.map(f)
result = r.execute().fetch()
expected = raw.map(lambda x: x + 1.)
pd.testing.assert_index_equal(result, expected)
def test_describe_execution(setup):
s_raw = pd.Series(np.random.rand(10))
# test one chunk
series = from_pandas_series(s_raw, chunk_size=10)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
# test multi chunks
series = from_pandas_series(s_raw, chunk_size=3)
r = series.describe()
result = r.execute().fetch()
expected = s_raw.describe()
pd.testing.assert_series_equal(result, expected)
r = series.describe(percentiles=[])
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[])
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(5)
df_raw = pd.DataFrame(rs.rand(10, 4), columns=list('abcd'))
df_raw['e'] = rs.randint(100, size=10)
# test one chunk
df = from_pandas_df(df_raw, chunk_size=10)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = series.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = s_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_series_equal(result, expected)
# test multi chunks
df = from_pandas_df(df_raw, chunk_size=3)
r = df.describe()
result = r.execute().fetch()
expected = df_raw.describe()
pd.testing.assert_frame_equal(result, expected)
r = df.describe(percentiles=[], include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
pd.testing.assert_frame_equal(result, expected)
# test skip percentiles
r = df.describe(percentiles=False, include=np.float64)
result = r.execute().fetch()
expected = df_raw.describe(percentiles=[], include=np.float64)
expected.drop(['50%'], axis=0, inplace=True)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
df.describe(percentiles=[1.1])
with pytest.raises(ValueError):
# duplicated values
df.describe(percentiles=[0.3, 0.5, 0.3])
# test input dataframe which has unknown shape
df = from_pandas_df(df_raw, chunk_size=3)
df2 = df[df['a'] < 0.5]
r = df2.describe()
result = r.execute().fetch()
expected = df_raw[df_raw['a'] < 0.5].describe()
pd.testing.assert_frame_equal(result, expected)
def test_data_frame_apply_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
df = from_pandas_df(df_raw, chunk_size=5)
r = df.apply('ffill')
result = r.execute().fetch()
expected = df_raw.apply('ffill')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(['sum', 'max'])
result = r.execute().fetch()
expected = df_raw.apply(['sum', 'max'])
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sqrt)
result = r.execute().fetch()
expected = df_raw.apply(np.sqrt)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2]))
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2]))
pd.testing.assert_frame_equal(result, expected)
r = df.apply(np.sum, axis='index')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='index')
pd.testing.assert_series_equal(result, expected)
r = df.apply(np.sum, axis='columns')
result = r.execute().fetch()
expected = df_raw.apply(np.sum, axis='columns')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1)
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
result = r.execute().fetch()
expected = df_raw.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: [1, 2], axis=1, result_type='expand')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: [1, 2], axis=1, result_type='expand')
pd.testing.assert_frame_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='reduce')
pd.testing.assert_series_equal(result, expected)
r = df.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
result = r.execute().fetch()
expected = df_raw.apply(lambda x: list(range(10)), axis=1, result_type='broadcast')
pd.testing.assert_frame_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
def test_series_apply_execute(setup):
idxes = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idxes)
series = from_pandas_series(s_raw, chunk_size=5)
r = series.apply('add', args=(1,))
result = r.execute().fetch()
expected = s_raw.apply('add', args=(1,))
pd.testing.assert_series_equal(result, expected)
r = series.apply(['sum', 'max'])
result = r.execute().fetch()
expected = s_raw.apply(['sum', 'max'])
pd.testing.assert_series_equal(result, expected)
r = series.apply(np.sqrt)
result = r.execute().fetch()
expected = s_raw.apply(np.sqrt)
pd.testing.assert_series_equal(result, expected)
r = series.apply('sqrt')
result = r.execute().fetch()
expected = s_raw.apply('sqrt')
pd.testing.assert_series_equal(result, expected)
r = series.apply(lambda x: [x, x + 1], convert_dtype=False)
result = r.execute().fetch()
expected = s_raw.apply(lambda x: [x, x + 1], convert_dtype=False)
pd.testing.assert_series_equal(result, expected)
s_raw2 = pd.Series([np.array([1, 2, 3]), np.array([4, 5, 6])])
series = from_pandas_series(s_raw2)
dtypes = pd.Series([np.dtype(float)] * 3)
r = series.apply(pd.Series, output_type='dataframe',
dtypes=dtypes)
result = r.execute().fetch()
expected = s_raw2.apply(pd.Series)
pd.testing.assert_frame_equal(result, expected)
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_apply_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.apply(lambda row: str(row[0]) + row[1], axis=1)
result = r.execute().fetch()
expected = df1.apply(lambda row: str(row[0]) + row[1], axis=1)
pd.testing.assert_series_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.apply(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.apply(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_transform_execute(setup):
cols = [chr(ord('A') + i) for i in range(10)]
df_raw = pd.DataFrame(dict((c, [i ** 2 for i in range(20)]) for c in cols))
idx_vals = [chr(ord('A') + i) for i in range(20)]
s_raw = pd.Series([i ** 2 for i in range(20)], index=idx_vals)
def rename_fn(f, new_name):
f.__name__ = new_name
return f
old_chunk_store_limit = options.chunk_store_limit
try:
options.chunk_store_limit = 20
# DATAFRAME CASES
df = from_pandas_df(df_raw, chunk_size=5)
# test transform scenarios on data frames
r = df.transform(lambda x: list(range(len(x))))
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))))
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: list(range(len(x))), axis=1)
result = r.execute().fetch()
expected = df_raw.transform(lambda x: list(range(len(x))), axis=1)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(['cumsum', 'cummax', lambda x: x + 1])
result = r.execute().fetch()
expected = df_raw.transform(['cumsum', 'cummax', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
fn_dict = OrderedDict([
('A', 'cumsum'),
('D', ['cumsum', 'cummax']),
('F', lambda x: x + 1),
])
r = df.transform(fn_dict)
result = r.execute().fetch()
expected = df_raw.transform(fn_dict)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1])
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.iloc[:-1], axis=1, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.iloc[:-1], axis=1)
pd.testing.assert_frame_equal(result, expected)
fn_list = [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]
r = df.transform(fn_list, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_list)
pd.testing.assert_frame_equal(result, expected)
r = df.transform(lambda x: x.sum(), _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(lambda x: x.sum())
pd.testing.assert_series_equal(result, expected)
fn_dict = OrderedDict([
('A', rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1')),
('D', [rename_fn(lambda x: x.iloc[1:].reset_index(drop=True), 'f1'),
lambda x: x.iloc[:-1].reset_index(drop=True)]),
('F', lambda x: x.iloc[:-1].reset_index(drop=True)),
])
r = df.transform(fn_dict, _call_agg=True)
result = r.execute().fetch()
expected = df_raw.agg(fn_dict)
pd.testing.assert_frame_equal(result, expected)
# SERIES CASES
series = from_pandas_series(s_raw, chunk_size=5)
# test transform scenarios on series
r = series.transform(lambda x: x + 1)
result = r.execute().fetch()
expected = s_raw.transform(lambda x: x + 1)
pd.testing.assert_series_equal(result, expected)
r = series.transform(['cumsum', lambda x: x + 1])
result = r.execute().fetch()
expected = s_raw.transform(['cumsum', lambda x: x + 1])
pd.testing.assert_frame_equal(result, expected)
# test transform on string dtype
df_raw = pd.DataFrame({'col1': ['str'] * 10, 'col2': ['string'] * 10})
df = from_pandas_df(df_raw, chunk_size=3)
r = df['col1'].transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw['col1'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
r = df.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = df_raw.transform(lambda x: x + '_suffix')
pd.testing.assert_frame_equal(result, expected)
r = df['col2'].transform(lambda x: x + '_suffix', dtype=np.dtype('str'))
result = r.execute().fetch()
expected = df_raw['col2'].transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
finally:
options.chunk_store_limit = old_chunk_store_limit
@pytest.mark.skipif(pa is None, reason='pyarrow not installed')
def test_transform_with_arrow_dtype_execution(setup):
df1 = pd.DataFrame({'a': [1, 2, 1],
'b': ['a', 'b', 'a']})
df = from_pandas_df(df1)
df['b'] = df['b'].astype('Arrow[string]')
r = df.transform({'b': lambda x: x + '_suffix'})
result = r.execute().fetch()
expected = df1.transform({'b': lambda x: x + '_suffix'})
pd.testing.assert_frame_equal(result, expected)
s1 = df1['b']
s = from_pandas_series(s1)
s = s.astype('arrow_string')
r = s.transform(lambda x: x + '_suffix')
result = r.execute().fetch()
expected = s1.transform(lambda x: x + '_suffix')
pd.testing.assert_series_equal(result, expected)
def test_string_method_execution(setup):
s = pd.Series(['s1,s2', 'ef,', 'dd', np.nan])
s2 = pd.concat([s, s, s])
series = from_pandas_series(s, chunk_size=2)
series2 = from_pandas_series(s2, chunk_size=2)
# test getitem
r = series.str[:3]
result = r.execute().fetch()
expected = s.str[:3]
pd.testing.assert_series_equal(result, expected)
# test split, expand=False
r = series.str.split(',', n=2)
result = r.execute().fetch()
expected = s.str.split(',', n=2)
pd.testing.assert_series_equal(result, expected)
# test split, expand=True
r = series.str.split(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.split(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test rsplit
r = series.str.rsplit(',', expand=True, n=1)
result = r.execute().fetch()
expected = s.str.rsplit(',', expand=True, n=1)
pd.testing.assert_frame_equal(result, expected)
# test cat all data
r = series2.str.cat(sep='/', na_rep='e')
result = r.execute().fetch()
expected = s2.str.cat(sep='/', na_rep='e')
assert result == expected
# test cat list
r = series.str.cat(['a', 'b', np.nan, 'c'])
result = r.execute().fetch()
expected = s.str.cat(['a', 'b', np.nan, 'c'])
pd.testing.assert_series_equal(result, expected)
# test cat series
r = series.str.cat(series.str.capitalize(), join='outer')
result = r.execute().fetch()
expected = s.str.cat(s.str.capitalize(), join='outer')
pd.testing.assert_series_equal(result, expected)
# test extractall
r = series.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
result = r.execute().fetch()
expected = s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
pd.testing.assert_frame_equal(result, expected)
# test extract, expand=False
r = series.str.extract(r'[ab](\d)', expand=False)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=False)
pd.testing.assert_series_equal(result, expected)
# test extract, expand=True
r = series.str.extract(r'[ab](\d)', expand=True)
result = r.execute().fetch()
expected = s.str.extract(r'[ab](\d)', expand=True)
pd.testing.assert_frame_equal(result, expected)
def test_datetime_method_execution(setup):
# test datetime
s = pd.Series([pd.Timestamp('2020-1-1'),
pd.Timestamp('2020-2-1'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.year
result = r.execute().fetch()
expected = s.dt.year
pd.testing.assert_series_equal(result, expected)
r = series.dt.strftime('%m-%d-%Y')
result = r.execute().fetch()
expected = s.dt.strftime('%m-%d-%Y')
pd.testing.assert_series_equal(result, expected)
# test timedelta
s = pd.Series([pd.Timedelta('1 days'),
pd.Timedelta('3 days'),
np.nan])
series = from_pandas_series(s, chunk_size=2)
r = series.dt.days
result = r.execute().fetch()
expected = s.dt.days
pd.testing.assert_series_equal(result, expected)
def test_isin_execution(setup):
# one chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=10)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in one chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=4)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
# multiple chunk in multiple chunks
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = from_pandas_series(b, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = pd.Series([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = np.array([2, 1, 9, 3])
sa = from_pandas_series(a, chunk_size=2)
sb = tensor(b, chunk_size=3)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
a = pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
b = {2, 1, 9, 3} # set
sa = from_pandas_series(a, chunk_size=2)
result = sa.isin(sb).execute().fetch()
expected = a.isin(b)
pd.testing.assert_series_equal(result, expected)
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 3)))
df = from_pandas_df(raw, chunk_size=(5, 2))
# set
b = {2, 1, raw[1][0]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin(b)
pd.testing.assert_frame_equal(result, expected)
# mars object
b = tensor([2, 1, raw[1][0]], chunk_size=2)
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin([2, 1, raw[1][0]])
pd.testing.assert_frame_equal(result, expected)
# dict
b = {1: tensor([2, 1, raw[1][0]], chunk_size=2),
2: [3, 10]}
r = df.isin(b)
result = r.execute().fetch()
expected = raw.isin({1: [2, 1, raw[1][0]], 2: [3, 10]})
pd.testing.assert_frame_equal(result, expected)
def test_cut_execution(setup):
session = setup
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
bins = [10, 100, 500]
ii = pd.interval_range(10, 500, 3)
labels = ['a', 'b']
t = tensor(raw, chunk_size=4)
series = from_pandas_series(s, chunk_size=4)
iii = from_pandas_index(ii, chunk_size=2)
# cut on Series
r = cut(series, bins)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins))
r, b = cut(series, bins, retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# cut on tensor
r = cut(t, bins)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# one chunk
r = cut(s, tensor(bins, chunk_size=2), right=False, include_lowest=True)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, bins, right=False, include_lowest=True))
# test labels
r = cut(t, bins, labels=labels)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
r = cut(t, bins, labels=False)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=False)
np.testing.assert_array_equal(result, expected)
# test labels which is tensor
labels_t = tensor(['a', 'b'], chunk_size=1)
r = cut(raw, bins, labels=labels_t, include_lowest=True)
# result and expected is array whose dtype is CategoricalDtype
result = r.execute().fetch()
expected = pd.cut(raw, bins, labels=labels, include_lowest=True)
assert len(result) == len(expected)
for r, e in zip(result, expected):
np.testing.assert_equal(r, e)
# test labels=False
r, b = cut(raw, ii, labels=False, retbins=True)
# result and expected is array whose dtype is CategoricalDtype
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(raw, ii, labels=False, retbins=True)
for r, e in zip(r_result, r_expected):
np.testing.assert_equal(r, e)
pd.testing.assert_index_equal(b_result, b_expected)
# test bins which is md.IntervalIndex
r, b = cut(series, iii, labels=tensor(labels, chunk_size=1), retbins=True)
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, ii, labels=labels, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
pd.testing.assert_index_equal(b_result, b_expected)
# test duplicates
bins2 = [0, 2, 4, 6, 10, 10]
r, b = cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
r_result = r.execute().fetch()
b_result = b.execute().fetch()
r_expected, b_expected = pd.cut(s, bins2, labels=False, retbins=True,
right=False, duplicates='drop')
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test integer bins
r = cut(series, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s, 3))
r, b = cut(series, 3, right=False, retbins=True)
r_result, b_result = session.fetch(*session.execute(r, b))
r_expected, b_expected = pd.cut(s, 3, right=False, retbins=True)
pd.testing.assert_series_equal(r_result, r_expected)
np.testing.assert_array_equal(b_result, b_expected)
# test min max same
s2 = pd.Series([1.1] * 15)
r = cut(s2, 3)
result = r.execute().fetch()
pd.testing.assert_series_equal(result, pd.cut(s2, 3))
# test inf exist
s3 = s2.copy()
s3[-1] = np.inf
with pytest.raises(ValueError):
cut(s3, 3).execute()
def test_transpose_execution(setup):
raw = pd.DataFrame({"a": ['1', '2', '3'], "b": ['5', '-6', '7'], "c": ['1', '2', '3']})
# test 1 chunk
df = from_pandas_df(raw)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# test multi chunks
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = from_pandas_df(raw, chunk_size=2)
result = df.T.execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# dtypes are varied
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": [5, -6, 7], "c": [1, 2, 3]})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
raw = pd.DataFrame({"a": [1.1, 2.2, 3.3], "b": ['5', '-6', '7']})
df = from_pandas_df(raw, chunk_size=2)
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
# Transposing from results of other operands
raw = pd.DataFrame(np.arange(0, 100).reshape(10, 10))
df = DataFrame(arange(0, 100, chunk_size=5).reshape(10, 10))
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
df = DataFrame(rand(100, 100, chunk_size=10))
raw = df.to_pandas()
result = df.transpose().execute().fetch()
pd.testing.assert_frame_equal(result, raw.transpose())
def test_to_numeric_execition(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100))
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test multi chunks
series = from_pandas_series(s, chunk_size=20)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test object dtype
s = pd.Series(['1.0', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series)
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s))
# test errors and downcast
s = pd.Series(['appple', 2, -3, '2.0'])
series = from_pandas_series(s)
r = to_numeric(series, errors='ignore', downcast='signed')
pd.testing.assert_series_equal(r.execute().fetch(),
pd.to_numeric(s, errors='ignore', downcast='signed'))
# test list data
l = ['1.0', 2, -3, '2.0']
r = to_numeric(l)
np.testing.assert_array_equal(r.execute().fetch(),
pd.to_numeric(l))
def test_q_cut_execution(setup):
rs = np.random.RandomState(0)
raw = rs.random(15) * 1000
s = pd.Series(raw, index=[f'i{i}' for i in range(15)])
series = from_pandas_series(s)
r = qcut(series, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
r = qcut(s, 3)
result = r.execute().fetch()
expected = pd.qcut(s, 3)
pd.testing.assert_series_equal(result, expected)
series = from_pandas_series(s)
r = qcut(series, [0.3, 0.5, 0.7])
result = r.execute().fetch()
expected = pd.qcut(s, [0.3, 0.5, 0.7])
pd.testing.assert_series_equal(result, expected)
r = qcut(range(5), 3)
result = r.execute().fetch()
expected = pd.qcut(range(5), 3)
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), [0.2, 0.5])
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
r = qcut(range(5), tensor([0.2, 0.5]))
result = r.execute().fetch()
expected = pd.qcut(range(5), [0.2, 0.5])
assert isinstance(result, type(expected))
pd.testing.assert_series_equal(pd.Series(result),
pd.Series(expected))
def test_shift_execution(setup):
# test dataframe
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
df = from_pandas_df(raw, chunk_size=5)
for periods in (2, -2, 6, -6):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df.shift(periods=periods, axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw.shift(periods=periods, axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected, check_dtype=False)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}'
) from e
raw2 = raw.copy()
raw2.index = pd.date_range('2020-1-1', periods=10)
raw2.columns = pd.date_range('2020-3-1', periods=8)
df2 = from_pandas_df(raw2, chunk_size=5)
# test freq not None
for periods in (2, -2):
for axis in (0, 1):
for fill_value in (None, 0, 1.):
r = df2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
try:
result = r.execute().fetch()
expected = raw2.shift(periods=periods, freq='D', axis=axis,
fill_value=fill_value)
pd.testing.assert_frame_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, axis: {axis}, fill_value: {fill_value}') from e
# test tshift
r = df2.tshift(periods=1)
result = r.execute().fetch()
expected = raw2.tshift(periods=1)
pd.testing.assert_frame_equal(result, expected)
with pytest.raises(ValueError):
_ = df.tshift(periods=1)
# test series
s = raw.iloc[:, 0]
series = from_pandas_series(s, chunk_size=5)
for periods in (0, 2, -2, 6, -6):
for fill_value in (None, 0, 1.):
r = series.shift(periods=periods, fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s.shift(periods=periods, fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
s2 = raw2.iloc[:, 0]
# test freq not None
series2 = from_pandas_series(s2, chunk_size=5)
for periods in (2, -2):
for fill_value in (None, 0, 1.):
r = series2.shift(periods=periods, freq='D', fill_value=fill_value)
try:
result = r.execute().fetch()
expected = s2.shift(periods=periods, freq='D', fill_value=fill_value)
pd.testing.assert_series_equal(result, expected)
except AssertionError as e: # pragma: no cover
raise AssertionError(
f'Failed when periods: {periods}, fill_value: {fill_value}') from e
def test_diff_execution(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(10, 8)),
columns=['col' + str(i + 1) for i in range(8)])
raw1 = raw.copy()
raw1['col4'] = raw1['col4'] < 400
r = from_pandas_df(raw1, chunk_size=(10, 5)).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw1, chunk_size=5).diff(-1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw1.diff(-1))
r = from_pandas_df(raw, chunk_size=(5, 8)).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1))
r = from_pandas_df(raw, chunk_size=5).diff(1, axis=1)
pd.testing.assert_frame_equal(r.execute().fetch(),
raw.diff(1, axis=1), check_dtype=False)
# test series
s = raw.iloc[:, 0]
s1 = s.copy() < 400
r = from_pandas_series(s, chunk_size=10).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s, chunk_size=5).diff(-1)
pd.testing.assert_series_equal(r.execute().fetch(),
s.diff(-1))
r = from_pandas_series(s1, chunk_size=5).diff(1)
pd.testing.assert_series_equal(r.execute().fetch(),
s1.diff(1))
def test_value_counts_execution(setup):
rs = np.random.RandomState(0)
s = pd.Series(rs.randint(5, size=100), name='s')
s[rs.randint(100)] = np.nan
# test 1 chunk
series = from_pandas_series(s, chunk_size=100)
r = series.value_counts()
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
# test multi chunks
series = from_pandas_series(s, chunk_size=30)
r = series.value_counts(method='tree')
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts())
r = series.value_counts(method='tree', normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(normalize=True))
# test bins and normalize
r = series.value_counts(method='tree', bins=5, normalize=True)
pd.testing.assert_series_equal(r.execute().fetch(),
s.value_counts(bins=5, normalize=True))
def test_astype(setup):
rs = np.random.RandomState(0)
raw = pd.DataFrame(rs.randint(1000, size=(20, 8)),
columns=['c' + str(i + 1) for i in range(8)])
# single chunk
df = from_pandas_df(raw)
r = df.astype('int32')
result = r.execute().fetch()
expected = raw.astype('int32')
| pd.testing.assert_frame_equal(expected, result) | pandas.testing.assert_frame_equal |
import pandas as pd
from SALib.analyze.radial_ee import analyze as ee_analyze
from SALib.analyze.sobol_jansen import analyze as jansen_analyze
from SALib.plotting.bar import plot as barplot
# results produced with
# python launch.py --specific_inputs oat_mc_10_samples.csv --num_cores 48
# python launch.py --specific_inputs oat_cim_extremes.csv --num_cores 2
# python launch.py --specific_inputs moat_10_samples.csv --num_cores 46
from .settings import *
data_dir = indir
problem = {
'num_vars': 53,
'names': ['Farm___Crops___variables___Dryland_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Dryland_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Dryland_Winter_Wheat___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Barley___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Barley___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Canola___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Canola___yield_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___root_depth_m',
'Farm___Crops___variables___Irrigated_Winter_Wheat___water_use_ML_per_Ha',
'Farm___Crops___variables___Irrigated_Winter_Wheat___yield_per_Ha',
'Farm___Fields___soil___zone_10___TAW_mm',
'Farm___Fields___soil___zone_11___TAW_mm',
'Farm___Fields___soil___zone_12___TAW_mm',
'Farm___Fields___soil___zone_1___TAW_mm',
'Farm___Fields___soil___zone_2___TAW_mm',
'Farm___Fields___soil___zone_3___TAW_mm',
'Farm___Fields___soil___zone_4___TAW_mm',
'Farm___Fields___soil___zone_5___TAW_mm',
'Farm___Fields___soil___zone_6___TAW_mm',
'Farm___Fields___soil___zone_7___TAW_mm',
'Farm___Fields___soil___zone_8___TAW_mm',
'Farm___Fields___soil___zone_9___TAW_mm',
'Farm___Irrigations___Gravity___cost_per_Ha',
'Farm___Irrigations___Gravity___head_pressure',
'Farm___Irrigations___Gravity___irrigation_efficiency',
'Farm___Irrigations___Gravity___pumping_cost_per_ML',
'Farm___Irrigations___PipeAndRiser___cost_per_Ha',
'Farm___Irrigations___PipeAndRiser___head_pressure',
'Farm___Irrigations___PipeAndRiser___irrigation_efficiency',
'Farm___Irrigations___PipeAndRiser___pumping_cost_per_ML',
'Farm___Irrigations___Spray___cost_per_Ha',
'Farm___Irrigations___Spray___head_pressure',
'Farm___Irrigations___Spray___irrigation_efficiency',
'Farm___Irrigations___Spray___pumping_cost_per_ML',
'Farm___zone_10___Irrigation', 'Farm___zone_11___Irrigation',
'Farm___zone_2___Irrigation', 'Farm___zone_4___Irrigation',
'Farm___zone_6___Irrigation', 'Farm___zone_7___Irrigation',
'Farm___zone_8___Irrigation', 'Farm___zone_9___Irrigation',
'policy___goulburn_allocation_scenario', 'policy___gw_cap',
'policy___gw_restriction'],
'bounds': [(0.80008164104, 1.49988829764),
(1.50055050742, 2.99888102069),
(1.5019032420200003, 3.4997506932099998),
(0.800586478968, 1.4996985073),
(2.50048002895, 5.9984797603299995),
(0.801052350325, 2.59824297051),
(0.800504246618, 1.49975544648),
(2.5014981435299997, 5.9979681912),
(1.5004709810799999, 5.99716646463),
(0.800280272497, 1.49937425734),
(1.5009590614, 2.9992559947000004),
(2.50329796931, 6.996816011819999),
(0.800211596215, 1.49974890273),
(2.0025975557, 5.99742468979),
(1.3008100600299999, 4.99958661017),
(0.8000586077680001, 1.7993585851400002),
(2.50005748529, 5.99920182664),
(1.5021921746899998, 7.99719295089),
(150.013080285, 199.99630294),
(145.01266211, 184.97447762599998),
(145.036691741, 184.96132256099997),
(145.017973816, 184.964659778),
(145.009985077, 184.987775366),
(100.017759932, 159.950281059),
(100.00893349, 159.939807798),
(150.002663759, 199.995911171),
(150.049539279, 199.966206716),
(75.011883698, 109.982509833),
(100.007801344, 159.986958043),
(145.015806747, 184.983072651),
(2000.04766978, 2499.9660698000002),
(8.00489093285, 14.999582054100001),
(0.500092622216, 0.8998440697460001),
(8.0072724319, 14.9995752798),
(2000.65212205, 3299.41488388),
(8.00365090987, 14.9983740134),
(0.600018657025, 0.899703908987),
(8.005434387660001, 14.9933485659),
(2500.62094903, 3499.76177012),
(25.0039236705, 34.9957834096),
(0.7001056060199999, 0.8998137827079999),
(30.000316497100002, 59.9914045149),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 1.0),
(0.0, 2.0),
(0.0, 2.0),
(0.600156362739, 0.999676343195),
(0.0, 1.0)]
}
def collect_results(problem, oat_length, reps, np_res, numeric_vals):
jansen_results_df = pd.DataFrame()
ee_results_df = pd.DataFrame()
rep_length = oat_length * reps
_, cols = np_res.shape
for col in range(cols):
cn = col_names[col]
res = np_res[:rep_length, col]
si = jansen_analyze(problem, res, reps, seed=101)
js_df = si.to_df()
js_df.columns = ['{}_{}'.format(cn, suf) for suf in js_df.columns]
jansen_results_df = pd.concat([jansen_results_df, js_df], axis=1)
si = ee_analyze(problem, numeric_vals[:rep_length],
res, reps, seed=101)
ee_df = si.to_df()
ee_df.columns = ['{}_{}'.format(cn, suf) for suf in ee_df.columns]
ee_results_df = pd.concat([ee_results_df, ee_df], axis=1)
return jansen_results_df, ee_results_df
# End collect_results()
def plot_results(jansen_results_df, ee_results_df, target_metric):
# STs = [c for c in jansen_results_df.columns if '_conf' not in c and target_metric in c]
idx = [True if 'irrigation' in r.lower() else False for r in jansen_results_df.index]
# ax = jansen_results_df.loc[idx, STs].plot(kind='bar', figsize=(10,6))
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
tgt_idx = [c for c in ee_results_df.columns if target_metric.lower() in c.lower()]
ax = ee_results_df.loc[idx, tgt_idx].plot(kind='bar', figsize=(10,6))
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# End plot_results()
template_df = pd.read_csv(f'{data_dir}example_sample.csv', index_col=0)
is_perturbed = (template_df != template_df.iloc[0]).any()
perturbed_cols = template_df.loc[:, is_perturbed].columns
target_num_vars = problem['num_vars']
oat_length = target_num_vars + 1
target_metric = "SW Allocation Index"
### Extreme values without interactions ###
numeric_samples = pd.read_csv(f'{data_dir}extreme_numeric_samples.csv', index_col=0)
numeric_samples = numeric_samples[perturbed_cols]
numeric_vals = numeric_samples.values
extreme_results = pd.read_csv(f'{data_dir}no_irrigation_extreme_results.csv', index_col=0)
np_res = extreme_results.values
col_names = extreme_results.columns
extreme_results = {}
for i in range(len(col_names)):
x_diff = (numeric_vals[0, :] - numeric_vals[1, :])
y_diff = (np_res[0, i] - np_res[1, i])
extreme_results[col_names[i]] = y_diff / x_diff
# End for
no_ext_results = pd.DataFrame(extreme_results, index=perturbed_cols).T
no_ext_results.columns = [c.replace('Farm___Irrigations___', '') for c in no_ext_results.columns]
tgt_cols = [c for c in no_ext_results.columns if 'gravity___irrigation_efficiency' in c.lower()]
# no_ext_results.loc[tgt_idx, tgt_cols].plot(kind='bar', legend=None)
### Extremes with interactions ###
extreme_results = | pd.read_csv(f'{data_dir}with_irrigation_extreme_results.csv', index_col=0) | pandas.read_csv |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/04-model-suite.ipynb (unless otherwise specified).
__all__ = ['ScikitModel', 'create_train_test_indexes', 'calculate_error_metrics', 'calc_month_error_metrics',
'construct_prediction_df', 'ModelSuite', 'load_module_attr', 'run_parameterised_model', 'plot_obsv_v_pred',
'create_residual_bin_avgs_s', 'plot_residual_bin_avgs', 'plot_pred_sample', 'flatten_list',
'plot_residuals_dist', 'visualise_errors', 'save_params', 'load_params']
# Cell
import yaml
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, train_test_split
from wpdhack import data, feature
from tqdm import tqdm
from random import randint
from typing import Protocol
from importlib import import_module
import seaborn as sns
import matplotlib.pyplot as plt
# Cell
class ScikitModel(Protocol):
def fit(self, X, y, sample_weight=None): ...
def predict(self, X): ...
def score(self, X, y, sample_weight=None): ...
def set_params(self, **params): ...
def create_train_test_indexes(X, **split_kwargs):
if 'n_splits' not in split_kwargs.keys():
assert 'test_size' in split_kwargs.keys(), 'You must provide either `n_splits` or `test_size` within the `split_kwargs` parameters'
size = X.shape[0]
index_array = np.linspace(0, size-1, size).astype(int)
train_test_indexes = [tuple(train_test_split(index_array, **split_kwargs))]
else:
kf = KFold(**split_kwargs)
train_test_indexes = kf.split(X)
return train_test_indexes
def calculate_error_metrics(y1, y2, y1_pred, y2_pred, y_baseline):
baseline_combined_rmse = np.sqrt(np.square(y_baseline - y1).sum() + np.square(y_baseline - y2).sum())
combined_rmse = np.sqrt(np.square(y1_pred - y1).sum() + np.square(y2_pred - y2).sum())
error_metrics = {
'y1_rmse': np.sqrt(np.square(y1_pred - y1).sum()),
'y2_rmse': np.sqrt(np.square(y2_pred - y2).sum()),
'combined_rmse': combined_rmse,
'skill_score': combined_rmse/baseline_combined_rmse
}
return error_metrics
def calc_month_error_metrics(
df_pred,
df_target,
month: str='2021-08'
):
common_idxs = pd.DatetimeIndex(sorted(list(set(df_pred.index.intersection(df_target.index.tz_convert(None))))))
df_pred, df_target = df_pred.loc[common_idxs], df_target.loc[common_idxs.tz_localize('UTC')]
df_pred, df_target = df_pred.drop_duplicates(), df_target.drop_duplicates()
error_metrics = calculate_error_metrics(
df_target.loc[month, 'value_max'].values,
df_target.loc[month, 'value_min'].values,
df_pred.loc[month, 'value_max'].values,
df_pred.loc[month, 'value_min'].values,
df_pred.loc[month, 'value_mean'].values
)
return error_metrics
def construct_prediction_df(
y1_pred: np.ndarray,
y2_pred: np.ndarray,
index: pd.Index,
df_features: pd.DataFrame
):
df_pred = pd.DataFrame({'value_max': y1_pred, 'value_min': y2_pred}, index=index)
df_pred.index.name = 'time'
df_pred.index = index.tz_convert(None)
# handling invalid values
invalid_max_idxs = df_pred.index[~(df_features['value'].values < df_pred['value_max'].values)]
invalid_min_idxs = df_pred.index[~(df_features['value'].values > df_pred['value_min'].values)]
if len(invalid_max_idxs) > 0:
df_pred.loc[invalid_max_idxs, 'value_max'] = df_features.loc[invalid_max_idxs.tz_localize('UTC'), 'value'].values
if len(invalid_min_idxs) > 0:
df_pred.loc[invalid_min_idxs, 'value_min'] = df_features.loc[invalid_min_idxs.tz_localize('UTC'), 'value'].values
# final checks
assert df_pred.isnull().sum().sum() == 0, 'There should be no NaN values in the predictions'
return df_pred
class ModelSuite:
def __init__(
self,
model_1: ScikitModel=RandomForestRegressor(),
model_2: ScikitModel=RandomForestRegressor(),
):
self.set_models(model_1, model_2)
return
def set_models(
self,
model_1: ScikitModel=RandomForestRegressor(),
model_2: ScikitModel=RandomForestRegressor(),
trained: bool=False
):
self.model_1 = model_1
self.model_2 = model_2
self.trained = trained
return
def fit_models(
self,
X: np.ndarray,
y1: np.ndarray,
y2: np.ndarray,
shuffle=True,
):
if shuffle == True:
shuffler = np.random.permutation(X.shape[0])
X, y1, y2 = X[shuffler], y1[shuffler], y2[shuffler]
if self.model_2 is not None:
self.model_1.fit(X, y1)
self.model_2.fit(X, y2)
else:
Y = np.column_stack([y1, y2])
self.model_1.fit(X, Y)
self.trained = True
return
def predict_models(
self,
X: np.ndarray
):
if self.model_2 is not None:
y1_pred = self.model_1.predict(X)
y2_pred = self.model_2.predict(X)
else:
Y_pred = self.model_1.predict(X)
y1_pred, y2_pred = Y_pred[:, 0], Y_pred[:, 1]
return y1_pred, y2_pred
def run_test(
self,
df_target: pd.DataFrame,
df_features: pd.DataFrame,
y1_col: str='value_max',
y2_col: str='value_min',
split_kwargs: dict={
'test_size': 0.1,
'shuffle': False
},
use_target_delta: bool=False,
fit_shuffle: bool=True
):
X = df_features.values
y1, y2 = df_target[y1_col].values, df_target[y2_col].values
y_baseline = df_features['value'].values
error_metrics = []
df_pred = pd.DataFrame()
train_test_indexes = create_train_test_indexes(X, **split_kwargs)
for train_index, test_index in train_test_indexes:
X_train, X_test, y1_train, y1_test, y2_train, y2_test, y_baseline_train, y_baseline_test = X[train_index], X[test_index], y1[train_index], y1[test_index], y2[train_index], y2[test_index], y_baseline[train_index], y_baseline[test_index]
self.fit_models(X_train, y1_train, y2_train, shuffle=fit_shuffle)
y1_pred, y2_pred = self.predict_models(X_test)
if use_target_delta == True:
y1_pred, y2_pred, y1_test, y2_test = y1_pred+y_baseline_test, y2_pred+y_baseline_test, y1_test+y_baseline_test, y2_test+y_baseline_test
df_pred = df_pred.append(construct_prediction_df(y1_pred, y2_pred, df_features.index[test_index], df_features.iloc[test_index]).assign(value_mean=y_baseline_test))
error_metrics += [calculate_error_metrics(y1_test, y2_test, y1_pred, y2_pred, y_baseline_test)]
df_pred = df_pred.sort_index()
avg_error_metrics = | pd.DataFrame(error_metrics) | pandas.DataFrame |
# This script is part of the supporting information to the manuscript entitled
# "Assessing the Calibration in Toxicological in Vitro Models with Conformal Prediction".
# The script was developed by <NAME> in the In Silico Toxicology and Structural Biology Group of
# Prof. Dr. <NAME> at the Charité Universitätsmedizin Berlin, in collaboration with
# <NAME>, <NAME>, <NAME>, <NAME> and <NAME>.
# It was last updated in December 2020.
import pandas as pd
import numpy as np
import random
import os
import math
import copy
import matplotlib.pyplot as plt
import scipy
from sklearn.datasets import load_svmlight_files
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from nonconformist.icp import IcpClassifier
import logging
logger = logging.getLogger(__name__)
# -------------------------------------------------------------------
# Load/handle signatures
# -------------------------------------------------------------------
def define_path(endpoint, data, signatures_path):
"""
Define the path where the signatures are stored.
As they were all created with cpsign, the path is similar
"""
path = os.path.join(
signatures_path, f"models_{endpoint}_{data}/sparse_data/data.csr"
)
return path
def load_signatures_files(path1, path2, path3):
"""
Load signatures from multiple .csr files (for multiple datasets)
This has the advantage, that the length of the signatures is automatically padded
Parameters
----------
path1 : Path to dataset 1, e.g. Tox21train
path2 : Path to dataset 2, e.g. Tox21test
path3 : Path to dataset 3, e.g. Tox21score
Returns
-------
X and y arrays for the three datasets
"""
# fixme: this function might be adapted to accept any number of paths
X1, y1, X2, y2, X3, y3 = load_svmlight_files([path1, path2, path3])
return X1, y1, X2, y2, X3, y3
def combine_csr(X1, y1, X2, y2):
"""
A function that combines two sparse matrices (signatures and labels). This is e.g. used for train_update in CPTox21
"""
X1_coo = X1.tocoo()
X2_coo = X2.tocoo()
len_X1 = X1_coo.shape[0]
X2_coo.row = np.array([i + len_X1 for i in X2_coo.row])
coo_data = scipy.concatenate((X1_coo.data, X2_coo.data))
coo_rows = scipy.concatenate((X1_coo.row, X2_coo.row))
coo_cols = scipy.concatenate((X1_coo.col, X2_coo.col))
X_comb_coo = scipy.sparse.coo_matrix(
(coo_data, (coo_rows, coo_cols)),
shape=(X1_coo.shape[0] + X2_coo.shape[0], X1_coo.shape[1]),
)
X_comb = X_comb_coo.tocsr()
y_comb = np.append(y1, y2, axis=0)
return X_comb, y_comb
# --------------------------------
# Samplers
# --------------------------------
class Sampler:
"""
Basic 'sampler' class, to generate samples/subsets for the different conformal prediction steps
"""
def _gen_samples(self, y):
raise NotImplementedError("Implement in your subclass")
pass
def gen_samples(self, labels):
"""
Parameters
----------
labels : pd.Series
a series of labels for the molecules
Returns
-------
"""
y = labels
return self._gen_samples(y)
@staticmethod
def _balance(y_idx, idx, ratio=1.0):
# Mask to distinguish compounds of inactive and active class of dataset
mask_0 = y_idx == 0
y_0 = idx[mask_0]
mask_1 = y_idx == 1
y_1 = idx[mask_1]
# Define which class corresponds to larger proper training set and is subject to undersampling
larger = y_0 if y_0.size > y_1.size else y_1
smaller = y_1 if y_0.size > y_1.size else y_0
# Subsample larger class until same number of instances as for smaller class is reached
while smaller.size < larger.size / ratio:
k = np.random.choice(range(larger.size))
larger = np.delete(larger, k)
idx = sorted(np.append(larger, smaller))
assert len(idx) == 2 * len(smaller)
return idx
@property
def name(self):
raise NotImplementedError("Implement in your subclass")
class CrossValidationSampler(Sampler):
"""
This is a sampler to be used for crossvalidation or cross-conformal predictors (not implemented yet)
Parameters
----------
n_folds : int
Number of folds. Must be at least 2
Attributes
----------
n_folds : int
Number of folds. Must be at least 2
Examples
--------
todo
"""
def __init__(self, n_folds=5, random_state=None):
self.n_folds = n_folds
self.random_state = random_state
def _gen_samples(self, y):
folds = StratifiedKFold(n_splits=self.n_folds, random_state=self.random_state, shuffle=True)
for i, (train, test) in enumerate(folds.split(X=np.zeros(len(y)), y=y)):
# i specifies the fold of the crossvalidation, i.e. between 0 and 4
yield i, train, test
@property
def name(self):
return self.__repr__()
def __repr__(self):
return f"<{self.__class__.__name__} with {self.n_folds} folds>"
class StratifiedRatioSampler(Sampler):
"""
This sampler can e.g. be used for aggregated conformal predictors
Parameters
----------
test_ratio : float
Should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split
Note: according to sklearn, test_ratio could also be int or None.
n_folds : int
Number of re-shuffling and splitting iterations.
Attributes
----------
test_ratio : float
Should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split
Note: according to sklearn, test_ratio could also be int or None.
n_folds : int
Number of re-shuffling and splitting iterations.
Examples
--------
todo
"""
def __init__(self, test_ratio=0.3, n_folds=1, random_state=None):
self.test_ratio = test_ratio
self.n_folds = n_folds
self.random_state = random_state
def _gen_samples(self, y):
sss = StratifiedShuffleSplit(n_splits=self.n_folds, test_size=self.test_ratio, random_state=self.random_state)
for i, (train, test) in enumerate(
sss.split(X=np.zeros(len(y)), y=y)
): # np.zeros used as a placeholder for X
yield i, train, test
@property
def name(self):
return self.__repr__()
def __repr__(self):
return f"<{self.__class__.__name__} with {self.n_folds} folds and using test_ratio {self.test_ratio}>"
class KnownIndicesSampler(Sampler):
"""
A sampler which already knows the indices for splitting
"""
def __init__(self, known_train, known_test):
known_train_test = []
for indices in zip(known_train, known_test):
known_train_test.append(indices)
self.known_indices = known_train_test
def _gen_samples(self, y):
for i, indices in enumerate(self.known_indices):
train = indices[0]
test = indices[1]
yield i, train, test
@property
def name(self):
return self.__repr__()
def __repr__(self):
return f"<{self.__class__.__name__} with {len(self.known_indices)} folds>"
# fixme: check if len(self.known_indices) makes sense
# -------------------------------------------------------------------
# Inductive Conformal Predictor
# -------------------------------------------------------------------
class InductiveConformalPredictor(IcpClassifier):
"""
Inductive Conformal Prediction Classifier
This is a subclass of the IcpClassifier from nonconformist
https://github.com/donlnz/nonconformist/blob/master/nonconformist/icp.py
The subclass allows to further extend the class to the needs of this project
Parameters
----------
# Note: some of the parameters descriptions are copied from nonconformist IcpClassifier
condition: condition for calculating p-values. Default condition is mondrian (calibration with 1 list
of nc scores per class). Note that default condition in nonconformist is 'lambda x: 0'
(only one list for both/multiple classes (?)).
For mondrian condition, see: https://pubs.acs.org/doi/10.1021/acs.jcim.7b00159
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity of
calibration examples and test patterns. Should implement ``fit(x, y)``
and ``calc_nc(x, y)``.
Attributes
----------
# Note: some of the attributes descriptions are copied from nonconformist IcpClassifier
condition: condition for calculating p-values. Note that if we want to use 'mondrian' condition,
we can either input condition='mondrian' or condition=(lambda instance: instance[1]).
Then, the condition.name will be saved, which is useful for serialisation
cal_x : numpy array of shape [n_cal_examples, n_features]
Inputs of calibration set.
cal_y : numpy array of shape [n_cal_examples]
Outputs of calibration set.
nc_function : BaseScorer
Nonconformity scorer object used to calculate nonconformity scores.
classes : numpy array of shape [n_classes]
List of class labels, with indices corresponding to output columns
of IcpClassifier.predict()
Examples
--------
todo
"""
def __init__(self, nc_function, condition=None, smoothing=False):
super().__init__(nc_function, condition=condition, smoothing=smoothing)
# fixme: this subclass was originally there to allow serialisation of the conformal predictors. However,
# this is not available yet
# -------------------------------
# Conformal Predictor Aggregators
# -------------------------------
class BaseConformalPredictorAggregator:
"""
Combines multiple InductiveConformalPredictor predictors into an aggregated model
The structure of this class is adapted from the nonconformist acp module:
https://github.com/donlnz/nonconformist/blob/master/nonconformist/acp.py
Parameters
----------
predictor : object
Prototype conformal predictor (i.e. InductiveConformalPredictor)
used for defining conformal predictors included in the aggregate model.
Attributes
----------
predictor : object
Prototype conformal predictor (i.e. InductiveConformalPredictor)
used for defining conformal predictors included in the aggregate model.
"""
def __init__(self, predictor):
self.predictor = predictor
def _fit_calibrate(self, **kwargs):
raise NotImplementedError("Implement in your subclass")
def fit_calibrate(self, **kwargs):
return self._fit_calibrate(**kwargs)
def _predict(self, **kwargs):
raise NotImplementedError("Implement in your subclass")
def predict(self, **kwargs):
return self._predict(**kwargs)
@property
def name(self):
raise NotImplementedError("Implement in your subclass")
class AggregatedConformalPredictor(BaseConformalPredictorAggregator):
"""
Generates an aggregated conformal predictor (acp) from multiple InductiveConformalPredictor predictors
The structure of this class is adapted from the nonconformist acp module:
https://github.com/donlnz/nonconformist/blob/master/nonconformist/acp.py
Parameters
----------
predictor : object
Prototype conformal predictor (i.e. InductiveConformalPredictor)
used for defining conformal predictors included in the aggregate model.
sampler : object
Sampler object used to generate training and calibration examples
for the underlying conformal predictors.
aggregation_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.median``.
n_models : int
Number of models to aggregate.
Attributes
----------
predictor : object
Prototype conformal predictor (i.e. InductiveConformalPredictor)
used for defining conformal predictors included in the aggregate model.
sampler : object
Sampler object used to generate training and calibration examples
for the underlying conformal predictors.
agg_func : callable
Function used to aggregate the predictions of the underlying
conformal predictors. Defaults to ``numpy.median``.
n_models : int
Number of models to aggregate.
predictors_fitted : list
contains fitted ICP's
predictors_calibrated : list
contains calibrated ICP's
predictors_calibrated_update : list
contains fitted ICP's calibrated with the update dataset
Examples
--------
todo
"""
def __init__(self, predictor, sampler, aggregation_func=None):
super().__init__(predictor)
self.predictor = predictor
self.predictors_fitted = []
self.predictors_calibrated = []
self.sampler = sampler
self.n_models = sampler.n_folds
self.agg_func = aggregation_func
@staticmethod
def _f(predictor, X):
return predictor.predict(X, None)
@staticmethod
def _f_nc(predictor, X, y):
pred_proba = predictor.nc_function.model.model.predict_proba(X)
nc = predictor.nc_function.err_func.apply(pred_proba, y)
nc_0 = nc[y == 0]
nc_1 = nc[y == 1]
return nc_0, nc_1
def _fit_calibrate(
self, X_train=None, y_train=None,
):
self.predictors_fitted.clear()
self.predictors_calibrated.clear()
samples = self.sampler.gen_samples(labels=y_train)
for loop, p_train, cal in samples:
predictor = copy.deepcopy(self.predictor)
# Fit
predictor.train_index = p_train
predictor.fit(X_train[p_train, :], y_train[p_train])
self.predictors_fitted.append(predictor)
# Calibrate
predictor_calibration = copy.deepcopy(predictor)
predictor_calibration.calibrate(X_train[cal, :], y_train[cal])
self.predictors_calibrated.append(predictor_calibration)
def _predict(self, X_score=None):
predictions = np.dstack(
[self._f(p, X_score) for p in self.predictors_calibrated]
)
predictions = self.agg_func(predictions, axis=2)
return predictions
def predict_nc(self, X_score=None, y_score=None):
nc_0_predictions = [
self._f_nc(p, X_score, y_score)[0] for p in self.predictors_fitted
]
nc_1_predictions = [
self._f_nc(p, X_score, y_score)[1] for p in self.predictors_fitted
]
nc_0_predictions = np.concatenate(nc_0_predictions).ravel().tolist()
nc_1_predictions = np.concatenate(nc_1_predictions).ravel().tolist()
return nc_0_predictions, nc_1_predictions
@property
def name(self):
return self.__repr__()
def __repr__(self):
return f"<{self.__class__.__name__}, samples generated with {self.sampler}, {self.n_models} models built>"
class CPTox21AggregatedConformalPredictor(AggregatedConformalPredictor):
"""
An aggregated conformal predictor class, specificly adapted for the CPTox21 calupdate part
"""
def __init__(self, predictor, sampler, aggregation_func=None):
super().__init__(predictor, sampler, aggregation_func)
self.predictors_calibrated_update = []
self.predictors_calibrated_update2 = []
def _fit_calibrate(
self,
X_train=None,
y_train=None,
X_update=None,
y_update=None,
X_update2=None,
y_update2=None,
):
self.predictors_fitted.clear()
self.predictors_calibrated.clear()
self.predictors_calibrated_update.clear()
self.predictors_calibrated_update2.clear()
samples = self.sampler.gen_samples(labels=y_train)
for loop, p_train, cal in samples: # i.e. 20 loops
predictor = copy.deepcopy(self.predictor)
# Fit
predictor.train_index = p_train
predictor.fit(X_train[p_train, :], y_train[p_train])
self.predictors_fitted.append(predictor)
# Calibrate
predictor_calibration = copy.deepcopy(predictor)
predictor_calibration.calibrate(X_train[cal, :], y_train[cal])
self.predictors_calibrated.append(predictor_calibration)
# cal_update - calibrate with "newer" calibration set
predictor_calibration_update = copy.deepcopy(predictor)
predictor_calibration_update.calibrate(X_update, y_update)
self.predictors_calibrated_update.append(predictor_calibration_update)
predictor_calibration_update2 = copy.deepcopy(predictor)
predictor_calibration_update2.calibrate(X_update2, y_update2)
self.predictors_calibrated_update2.append(predictor_calibration_update2)
def predict_cal_update(self, X_score=None):
predictions_cal_update = np.dstack(
[self._f(p, X_score) for p in self.predictors_calibrated_update]
)
predictions_cal_update = self.agg_func(predictions_cal_update, axis=2)
return predictions_cal_update
def predict_cal_update2(self, X_score=None):
predictions_cal_update2 = np.dstack(
[self._f(p, X_score) for p in self.predictors_calibrated_update2]
)
predictions_cal_update2 = self.agg_func(predictions_cal_update2, axis=2)
return predictions_cal_update2
# --------------------------------
# Crossvalidation
# --------------------------------
class CrossValidator:
"""
This is a class to perform a crossvalidation using aggregated conformal predictors.
Note that this class only provides predictions within the crossvalidation, i.e.
of the test set split from X/y. If you want to predict external data within the
crossvalidation, use one of the provided subclasses or implement your own subclass
"""
def __init__(self, predictor, cv_splitter):
self.sampler = cv_splitter
self.predictor = predictor
self._evaluation_df_cv = None
self._cv_predictions = None
self.cv_predictors = None
self.num_actives = 0
self.num_inactives = 0
def cross_validate(
self, steps, endpoint=None, X=None, y=None, class_wise_evaluation=False,
):
num_actives = y.sum()
self.num_actives = num_actives
self.num_inactives = len(y) - num_actives
cv_predictions = []
cv_y_test = []
cv_predictors = []
cv_evaluations = self._create_empty_evaluations_dict()
samples = self.sampler.gen_samples(labels=y)
for fold, train, test in samples:
cv_y_test.append(y[test])
predictor = copy.deepcopy(self.predictor)
# Fit ACP
predictor.fit_calibrate(X_train=X[train], y_train=y[train])
cv_predictors.append(predictor)
cv_prediction = predictor.predict(X_score=X[test])
cv_predictions.append(cv_prediction)
cv_evaluations = self._evaluate(
cv_prediction,
y[test],
cv_evaluations,
endpoint,
fold=fold,
steps=steps,
class_wise=class_wise_evaluation,
)
self._evaluation_df_cv = pd.DataFrame(cv_evaluations)
self._cv_predictions = [cv_predictions, cv_y_test]
self.cv_predictors = cv_predictors
return pd.DataFrame(cv_evaluations)
@staticmethod
def _create_empty_evaluations_dict():
evaluation_measures = [
"validity",
"validity_0",
"validity_1",
"error_rate",
"error_rate_0",
"error_rate_1",
"efficiency",
"efficiency_0",
"efficiency_1",
"accuracy",
"accuracy_0",
"accuracy_1",
]
empty_evaluations_dict = {}
for measure in evaluation_measures:
empty_evaluations_dict[measure] = []
empty_evaluations_dict["significance_level"] = []
empty_evaluations_dict["fold"] = []
return empty_evaluations_dict
@staticmethod
def _evaluate(
prediction, y_true, evaluations, endpoint, fold, steps, class_wise=True
):
# fixme later 1: currently class-wise evaluation measures are calculated anyways but only saved
# if class_wise is True. Library might be changed, so that they are only calculated if necessary
# fixme later 2: validity and error_rate could be calculated using the same method, no need to do this twice
evaluator = Evaluator(prediction, y_true, endpoint)
sl = [i / float(steps) for i in range(steps)] + [1]
validities_list = ["validity", "validity_0", "validity_1"]
error_rates_list = ["error_rate", "error_rate_0", "error_rate_1"]
efficiencies_list = ["efficiency", "efficiency_0", "efficiency_1"]
accuracies_list = ["accuracy", "accuracy_0", "accuracy_1"]
validities = [
evaluator.calculate_validity(i / float(steps)) for i in range(steps)
] + [evaluator.calculate_validity(1)]
for validity in validities_list:
evaluations[validity].extend([val[validity] for val in validities])
error_rates = [
evaluator.calculate_error_rate(i / float(steps)) for i in range(steps)
] + [evaluator.calculate_error_rate(1)]
for error_rate in error_rates_list:
evaluations[error_rate].extend([err[error_rate] for err in error_rates])
efficiencies = [
evaluator.calculate_efficiency(i / float(steps)) for i in range(steps)
] + [evaluator.calculate_efficiency(1)]
for efficiency in efficiencies_list:
evaluations[efficiency].extend([eff[efficiency] for eff in efficiencies])
accuracies = [
evaluator.calculate_accuracy(i / float(steps)) for i in range(steps)
] + [evaluator.calculate_accuracy(1)]
for accuracy in accuracies_list:
evaluations[accuracy].extend([acc[accuracy] for acc in accuracies])
evaluations["significance_level"].extend(sl)
evaluations["fold"].extend([fold] * (steps + 1))
return evaluations
@property
def averaged_evaluation_df_cv(self):
return self._average_evaluation_df(
self._evaluation_df_cv, self.num_actives, self.num_inactives
)
@staticmethod
def _average_evaluation_df(evaluation_df, num_actives, num_inactives):
evaluation_df_grouped = evaluation_df.groupby(
by="significance_level"
).aggregate([np.mean, np.std])
evaluation_df_grouped.drop(["fold"], axis=1, inplace=True)
evaluation_df_grouped.columns = [
" ".join((a, b)) for a, b in evaluation_df_grouped.columns
]
evaluation_df_grouped.columns = evaluation_df_grouped.columns.get_level_values(
0
)
evaluation_df_grouped["significance_level"] = evaluation_df_grouped.index
evaluation_df_grouped["num_actives"] = num_actives
evaluation_df_grouped["num_inactives"] = num_inactives
return evaluation_df_grouped
@property
def cv_predictions_df(self):
return self._format_predictions_df(self._cv_predictions, self._cv_names)
@staticmethod
def _format_predictions_df(predictions, names):
# print("names", type(names), names)
pred_dfs = []
for i, pred in enumerate(predictions):
pred_df = | pd.DataFrame(data=predictions[0][i]) | pandas.DataFrame |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import gc
from utils import *
# In[3]:
train_active = pd.read_csv("../input/train_active.csv")
test_active = pd.read_csv("../input/test_active.csv")
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=['date_from', 'date_to'])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=['date_from', 'date_to'])
# In[7]:
all_periods = pd.concat([
train_periods,
test_periods
])
del train_periods
del test_periods
gc.collect()
# all_periods['days_up'] = (all_periods['date_to'] - all_periods['date_from']).dt.days
# In[12]:
all_periods['days_up'] = all_periods['date_to'].subtract(all_periods['date_from']).dt.days
# In[13]:
all_periods.head()
# In[14]:
all_periods = all_periods.groupby("item_id").days_up.mean().reset_index()
# In[15]:
all_periods.head()
# In[ ]:
active = | pd.concat([train_active, test_active]) | pandas.concat |
import pandas as pd
from util import normalize_dates, conversion, normalize_numeric, normalize_text
from errors import ApportionSeriesCombinationError
import dateutil.relativedelta
from traffic import get_data, addIds
import json
def hasData(df, col):
if df[col].sum() > 0:
return True
else:
return False
def hasNotNull(df, col):
for x in df[col]:
if not pd.isnull(x):
return True
return False
def process_apportionment(test=False, sql=False, companies=False):
if sql:
df = get_data(False, True, "apportionment.sql")
elif test:
print('no tests for apportionment data!')
else:
print('reading local apportionment csv...')
df = pd.read_csv("./raw_data/apportionment.csv")
df = normalize_dates(df, ['Date'])
df = normalize_text(df, ['Corporate Entity'])
# enbridge processing
df = df.drop(df[(df['Corporate Entity'] == 'Enbridge Pipelines Inc.') & (df['Key Point'] != 'system')].index)
df = df.drop(df[(df['Corporate Entity'] == 'Enbridge Pipelines Inc.') & (df['Date'].dt.year < 2016)].index)
# cochin processing
df = df.drop(df[(df['Corporate Entity'] == 'PKM Cochin ULC') & (df['Key Point'] != 'Ft. Saskatchewan')].index)
df = df[~df['Pipeline Name'].isin(["Southern Lights Pipeline",
"Westpur Pipeline",
"Trans-Northern"])].reset_index(drop=True)
df['Key Point'] = df['Key Point'].replace("All", "system")
df = addIds(df)
del df['Pipeline Name']
df = df.rename(columns={x: x.split("(")[0].strip() for x in df.columns})
numCols = ['Available Capacity', 'Original Nominations', 'Accepted Nominations', 'Apportionment Percentage']
df = normalize_numeric(df, numCols, 2)
df = conversion(df, "oil", numCols[:-1], 2, False)
df['Apportionment Percentage'] = df['Apportionment Percentage'].round(2)
company_files = ['NOVA Gas Transmission Ltd.',
'Westcoast Energy Inc.',
'TransCanada PipeLines Limited',
'Alliance Pipeline Ltd.',
'Trans Quebec and Maritimes Pipeline Inc.',
'Maritimes & Northeast Pipeline Management Ltd.',
'Many Islands Pipe Lines (Canada) Limited',
'Emera Brunswick Pipeline Company Ltd.',
'Foothills Pipe Lines Ltd.',
'Enbridge Pipelines Inc.',
'TransCanada Keystone Pipeline GP Ltd.',
'Trans Mountain Pipeline ULC',
'PKM Cochin ULC',
'Trans-Northern Pipelines Inc.',
'Enbridge Pipelines (NW) Inc.',
'Enbridge Southern Lights GP Inc.',
'Kingston Midstream Westspur Limited',
'Vector Pipeline Limited Partnership',
'Many Islands Pipe Lines (Canada) Limited',
'Plains Midstream Canada ULC',
'Enbridge Bakken Pipeline Company Inc.',
'Express Pipeline Ltd.',
'Genesis Pipeline Canada Ltd.',
'Montreal Pipe Line Limited',
'Aurora Pipeline Company Ltd',
'Kingston Midstream Westspur Limited',
'Enbridge Southern Lights GP Inc.',
'Emera Brunswick Pipeline Company Ltd.']
# for company in ['Enbridge Pipelines (NW) Inc.']:
for company in company_files:
thisCompanyData = {}
folder_name = company.replace(' ', '').replace('.', '')
df_c = df[df['Corporate Entity'] == company].copy().reset_index(drop=True)
if company == "Enbridge Pipelines Inc.":
df_c = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Load in ground-truth simulations together with fits from lavaan, HDDMnn and pyDDM. Visualize the results.
Created on Fri Mar 11 10:35:42 2022
@author: urai
"""
import pandas as pd
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
mypath = '/Users/urai/Documents/code/ddm_mediation'
#%%
def corrfunc(x, y, **kws):
# compute spearmans correlation
r, pval = sp.stats.spearmanr(x, y, nan_policy='omit')
print('%s, %s, %.2f, %.3f'%(x.name, y.name, r, pval))
if 'ax' in kws.keys():
ax = kws['ax']
else:
ax = plt.gca()
# if this correlates, draw a regression line across groups
if pval < 0.0001:
sns.regplot(x, y, truncate=True, color='gray',
scatter=False, ci=None, robust=True, ax=ax)
# now plot the datapoints
sns.regplot(x=x, y=y, fit_reg=False, truncate=True, **kws)
plt.axis('tight')
# annotate with the correlation coefficient + n-2 degrees of freedom
txt = r"$\rho$({}) = {:.3f}".format(len(x)-2, r) + "\n" + "p = {:.4f}".format(pval)
if pval < 0.0001:
txt = r"$\rho$({}) = {:.3f}".format(len(x)-2, r) + "\n" + "p < 0.0001"
ax.annotate(txt, xy=(.7, .1), xycoords='axes fraction', fontsize='small')
# indicate identity line?
#%%
for eff_x in ['v', 'z', 'no']:
for eff_m in ['v', 'z', 'no']:
## load ground-truth data
sim_df = pd.read_csv('%s/data/param_df_X%s_M%s.csv'%(mypath, eff_x, eff_m))
sim_df.columns = sim_df.columns.map(lambda x: 'sim_' + str(x))
sim_df = sim_df.rename(columns={'sim_subj_idx':'subj_idx'})
## load lavaan-fitted data
lavaan_df = pd.read_csv('%s/data/fit_lavaan_X%s_M%s.csv'%(mypath, eff_x, eff_m))
lavaan_df = lavaan_df.pivot_table(values='est', index='subj_idx', columns='label').reset_index()
lavaan_df.columns = lavaan_df.columns.map(lambda x: 'lav_' + str(x))
lavaan_df = lavaan_df.rename(columns={'lav_subj_idx':'subj_idx'})
# merge
df = | pd.merge(lavaan_df, sim_df, on='subj_idx') | pandas.merge |
#!/usr/bin/env python3
import os
import warnings
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_distances
def read_explanations(path):
header = []
uid = None
df = pd.read_csv(path, sep='\t', dtype=str)
for name in df.columns:
if name.startswith('[SKIP]'):
if 'UID' in name and not uid:
uid = name
else:
header.append(name)
if not uid or len(df) == 0:
warnings.warn('Possibly misformatted file: ' + path)
return []
return df.apply(lambda r: (r[uid], ' '.join(str(s) for s in list(r[header]) if not pd.isna(s))), 1).tolist()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--tables', type=str, required=True)
parser.add_argument('--questions', type=argparse.FileType('r', encoding='UTF-8'), required=True)
parser.add_argument('--output', type=str, required=True)
parser.add_argument('-n', '--nearest', type=int, default=5000)
parser.add_argument('--mcq-choices', type=str, choices=['none', 'correct', 'all'], default="all")
args = parser.parse_args()
explanations = []
for path, _, files in os.walk(args.tables):
for file in files:
explanations += read_explanations(os.path.join(path, file))
df_q = pd.read_csv(args.questions, sep='\t', dtype=str)
df_e = | pd.DataFrame(explanations, columns=('uid', 'text')) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta, datetime
from random import randint
from decimal import Decimal
import re
import time
from odps.df.backends.tests.core import TestBase, to_str, tn, pandas_case
from odps.compat import unittest, irange as xrange, OrderedDict
from odps.errors import ODPSError
from odps.df.types import validate_data_type, DynamicSchema
from odps.df.expr.expressions import *
from odps.df.backends.pd.engine import PandasEngine
from odps.df.backends.odpssql.engine import ODPSSQLEngine
from odps.df.backends.odpssql.types import df_schema_to_odps_schema
from odps.df.backends.context import context
from odps.df.backends.errors import CompileError
from odps.df import output_types, output_names, output, day, millisecond, agg, make_list, make_dict
TEMP_FILE_RESOURCE = tn('pyodps_tmp_file_resource')
TEMP_TABLE = tn('pyodps_temp_table')
TEMP_TABLE_RESOURCE = tn('pyodps_temp_table_resource')
@pandas_case
class Test(TestBase):
def setup(self):
datatypes = lambda *types: [validate_data_type(t) for t in types]
schema = Schema.from_lists(['name', 'id', 'fid', 'isMale', 'scale', 'birth'],
datatypes('string', 'int64', 'float64', 'boolean', 'decimal', 'datetime'))
self.schema = df_schema_to_odps_schema(schema)
import pandas as pd
self.df = | pd.DataFrame(None, columns=schema.names) | pandas.DataFrame |
"""
Functions for comparing and visualizing model performance. Most of these functions rely on ATOM's model tracker and
datastore services, which are not part of the standard AMPL installation, but a few functions will work on collections of
models saved as local files.
"""
import os
import sys
import pdb
import pandas as pd
import numpy as np
import matplotlib
import logging
import json
import shutil
import tarfile
import tempfile
from collections import OrderedDict
from atomsci.ddm.utils import datastore_functions as dsf
from atomsci.ddm.pipeline import model_tracker as trkr
import atomsci.ddm.pipeline.model_pipeline as mp
import atomsci.ddm.pipeline.parameter_parser as parse
import atomsci.ddm.pipeline.model_wrapper as mw
import atomsci.ddm.pipeline.featurization as feat
from tensorflow.python.keras.utils.layer_utils import count_params
logger = logging.getLogger('ATOM')
mlmt_supported = True
try:
from atomsci.clients import MLMTClient
except (ModuleNotFoundError, ImportError):
logger.debug("Model tracker client not supported in your environment; can look at models in filesystem only.")
mlmt_supported = False
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('axes', labelsize=12)
logging.basicConfig(format='%(asctime)-15s %(message)s')
nan = np.float32('nan')
#------------------------------------------------------------------------------------------------------------------
def del_ignored_params(dictionary, ignored_params):
"""
Deletes ignored parameters from the dictionary if they exist
Args:
dictionary (dict): A dictionary with parameters
ignored_parameters (list(str)): A list of keys potentially in the dictionary
Returns:
None
"""
for ip in ignored_params:
if ip in dictionary:
del dictionary[ip]
#------------------------------------------------------------------------------------------------------------------
def get_collection_datasets(collection_name):
"""
Returns a list of unique training datasets used for all models in a given collection.
Args:
collection_name (str): Name of model tracker collection to search for models.
Returns:
list: List of model training (dataset_key, bucket) tuples.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
dataset_set = set()
mlmt_client = dsf.initialize_model_tracker()
dset_dicts = mlmt_client.model.query_datasets(collection_name=collection_name, metrics_type='training').result()
# Convert to a list of (dataset_key, bucket) tuples
for dset_dict in dset_dicts:
dataset_set.add((dset_dict['dataset_key'], dset_dict['bucket']))
return sorted(dataset_set)
#------------------------------------------------------------------------------------------------------------------
def extract_collection_perf_metrics(collection_name, output_dir, pred_type='regression'):
"""
Obtain list of training datasets with models in the given collection. Get performance metrics for
models on each dataset and save them as CSV files in the given output directory.
Args:
collection_name (str): Name of model tracker collection to search for models.
output_dir (str): Directory where tables of performance metrics will be written.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
None
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
datasets = get_collection_datasets(collection_name)
os.makedirs(output_dir, exist_ok=True)
for dset_key, bucket in datasets:
dset_perf_df = get_training_perf_table(dset_key, bucket, collection_name, pred_type=pred_type)
dset_perf_file = '%s/%s_%s_model_perf_metrics.csv' % (output_dir, os.path.basename(dset_key).replace('.csv', ''), collection_name)
dset_perf_df.to_csv(dset_perf_file, index=False)
print('Wrote file %s' % dset_perf_file)
#------------------------------------------------------------------------------------------------------------------
def get_training_perf_table(dataset_key, bucket, collection_name, pred_type='regression', other_filters = {}):
"""
Load performance metrics from model tracker for all models saved in the model tracker DB under
a given collection that were trained against a particular dataset. Identify training parameters
that vary between models, and generate plots of performance vs particular combinations of
parameters.
Args:
dataset_key (str): Training dataset key.
bucket (str): Training dataset bucket.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
other_filters (dict): Other filter criteria to use in querying models.
Returns:
pd.DataFrame: Table of models and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
model_type_list = []
max_epochs_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
splitter_list = []
rf_estimators_list = []
rf_max_features_list = []
rf_max_depth_list = []
xgb_learning_rate_list = []
xgb_gamma_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
score_dict = {}
for subset in subsets:
score_dict[subset] = []
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key = metadata_dict['training_dataset']['dataset_key']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'RF':
rf_params = metadata_dict['rf_specific']
rf_estimators_list.append(rf_params['rf_estimators'])
rf_max_features_list.append(rf_params['rf_max_features'])
rf_max_depth_list.append(rf_params['rf_max_depth'])
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(nan)
xgb_gamma_list.append(nan)
if model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
rf_estimators_list.append(nan)
rf_max_features_list.append(nan)
rf_max_depth_list.append(nan)
max_epochs_list.append(nan)
best_epoch_list.append(nan)
learning_rate_list.append(nan)
layer_sizes_list.append(nan)
dropouts_list.append(nan)
xgb_learning_rate_list.append(xgb_params["xgb_learning_rate"])
xgb_gamma_list.append(xgb_params["xgb_gamma"])
for subset in subsets:
score_dict[subset].append(subset_metrics[subset][metric_type])
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_type=model_type_list,
dataset_key=dataset_key,
featurizer=featurizer_list,
splitter=splitter_list,
max_epochs=max_epochs_list,
best_epoch=best_epoch_list,
learning_rate=learning_rate_list,
layer_sizes=layer_sizes_list,
dropouts=dropouts_list,
rf_estimators=rf_estimators_list,
rf_max_features=rf_max_features_list,
rf_max_depth=rf_max_depth_list,
xgb_learning_rate = xgb_learning_rate_list,
xgb_gamma = xgb_gamma_list))
for subset in subsets:
metric_col = '%s_%s' % (metric_type, subset)
perf_df[metric_col] = score_dict[subset]
sort_metric = '%s_valid' % metric_type
perf_df = perf_df.sort_values(sort_metric, ascending=False)
return perf_df
# -----------------------------------------------------------------------------------------------------------------
def extract_model_and_feature_parameters(metadata_dict):
"""
Given a config file, extract model and featuer parameters. Looks for parameter names
that end in *_specific. e.g. nn_specific, auto_featurizer_specific
Args:
model_metadict (dict): Dictionary containing NON-FLATTENED metadata for an AMPL model
Returns:
dictionary containing featurizer and model parameters. Most contain the following
keys. ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'drop_outs',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate',
'featurizer_parameters_dict', 'model_parameters_dict']
"""
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
required = ['max_epochs', 'best_epoch', 'learning_rate', 'layer_sizes', 'dropouts',
'rf_estimators', 'rf_max_features', 'rf_max_depth', 'xgb_gamma', 'xgb_learning_rate']
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
if model_type == 'NN':
nn_params = metadata_dict['nn_specific']
model_info['max_epochs'] = nn_params['max_epochs']
model_info['best_epoch'] = nn_params['best_epoch']
model_info['learning_rate'] = nn_params['learning_rate']
model_info['layer_sizes'] = ','.join(['%d' % s for s in nn_params['layer_sizes']])
model_info['dropouts'] = ','.join(['%.2f' % d for d in nn_params['dropouts']])
elif model_type == 'RF':
rf_params = metadata_dict['rf_specific']
model_info['rf_estimators'] = rf_params['rf_estimators']
model_info['rf_max_features'] = rf_params['rf_max_features']
model_info['rf_max_depth'] = rf_params['rf_max_depth']
elif model_type == 'xgboost':
xgb_params = metadata_dict['xgb_specific']
model_info['xgb_gamma'] = xgb_params['xgb_gamma']
model_info['xgb_learning_rate'] = xgb_params['xgb_learning_rate']
for r in required:
if r not in model_info:
# all fields must be filled in
model_info[r] = nan
# the new way of extracting model parameters is to simply save them in json
if 'nn_specific' in metadata_dict:
model_metadata = metadata_dict['nn_specific']
# include learning rate, max_epochs, and best_epoch for convenience
model_info['max_epochs'] = model_metadata['max_epochs']
model_info['best_epoch'] = model_metadata['best_epoch']
learning_rate_col = [c for c in model_metadata.keys() if c.endswith('learning_rate')]
if len(learning_rate_col) == 1:
model_info['learning_rate'] = model_metadata[learning_rate_col[0]]
# delete several parameters that aren't normally saved
ignored_params = ['batch_size','bias_init_consts','optimizer_type',
'weight_decay_penalty','weight_decay_penalty_type','weight_init_stddevs']
del_ignored_params(model_metadata, ignored_params)
elif 'rf_specific' in metadata_dict:
model_metadata = metadata_dict['rf_specific']
elif 'xgb_specific' in metadata_dict:
model_metadata = metadata_dict['xgb_specific']
# delete several parameters that aren't normally saved
ignored_params = ['xgb_colsample_bytree','xgb_max_depth',
'xgb_min_child_weight','xgb_n_estimators','xgb_subsample']
del_ignored_params(model_metadata, ignored_params)
else:
# no model parameters found
model_metadata = {}
model_info['model_parameters_dict'] = json.dumps(model_metadata)
if 'ecfp_specific' in metadata_dict:
feat_metadata = metadata_dict['ecfp_specific']
elif 'auto_featurizer_specific' in metadata_dict:
feat_metadata = metadata_dict['auto_featurizer_specific']
elif 'autoencoder_specific' in metadata_dict:
feat_metadata = metadata_dict['autoencoder_specific']
else:
# no model parameters found
feat_metadata = {}
model_info['feat_parameters_dict'] = json.dumps(feat_metadata)
return model_info
# ------------------------------------------------------------------------------------------------------------------
def get_best_perf_table(metric_type, col_name=None, result_dir=None, model_uuid=None, metadata_dict=None, PK_pipe=False):
"""
Extract parameters and training run performance metrics for a single model. The model may be
specified either by a metadata dictionary, a model_uuid or a result directory; in the model_uuid case, the function
queries the model tracker DB for the model metadata. For models saved in the filesystem, can query the performance
data from the original result directory, but not from a saved tarball.
Args:
metric_type (str): Performance metric to include in result dictionary.
col_name (str): Collection name containing model, if model is specified by model_uuid.
result_dir (str): result directory of the model, if Model tracker is not supported and metadata_dict not provided.
model_uuid (str): UUID of model to query, if metadata_dict is not provided.
metadata_dict (dict): Full metadata dictionary for a model, including training metrics and
dataset metadata.
PK_pipe (bool): If True, include some additional parameters in the result dictionary specific to PK models.
Returns:
model_info (dict): Dictionary of parameter or metric name - value pairs.
Todo:
Add support for models saved as local tarball files.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
elif mlmt_supported and col_name:
mlmt_client = dsf.initialize_model_tracker()
if metadata_dict is None:
if model_uuid is None:
print("Have to specify either metadata_dict or model_uuid")
return
query_params = {
"match_metadata": {
"model_uuid": model_uuid,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params
).result())
if len(metadata_list) == 0:
print("No matching models returned")
return None
metadata_dict = metadata_list[0]
elif result_dir:
model_dir = ""
for dirpath, dirnames, filenames in os.walk(result_dir):
if model_uuid in dirnames:
model_dir = os.path.join(dirpath, model_uuid)
break
if model_dir:
with open(os.path.join(model_dir, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
else:
print(f"model_uuid ({model_uuid}) not exist in {result_dir}.")
return None
model_info = {}
model_info['model_uuid'] = metadata_dict['model_uuid']
model_info['collection_name'] = col_name
# Get model metrics for this model
metrics_dicts = [d for d in metadata_dict['training_metrics'] if d['label'] == 'best']
if len(metrics_dicts) != 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
return None
model_params = metadata_dict['model_parameters']
model_info['model_type'] = model_params['model_type']
model_info['featurizer'] = model_params['featurizer']
split_params = metadata_dict['splitting_parameters']
model_info['splitter'] = split_params['splitter']
if 'split_uuid' in split_params:
model_info['split_uuid'] = split_params['split_uuid']
model_info['dataset_key'] = metadata_dict['training_dataset']['dataset_key']
model_info['bucket'] = metadata_dict['training_dataset']['bucket']
dset_meta = metadata_dict['training_dataset']['dataset_metadata']
if PK_pipe:
model_info['assay_name'] = dset_meta.get('assay_category', 'NA')
model_info['response_col'] = dset_meta.get('response_cols', dset_meta.get('response_col', 'NA'))
try:
model_info['descriptor_type'] = metadata_dict['descriptor_specific']['descriptor_type']
except KeyError:
model_info['descriptor_type'] = 'NA'
try:
model_info['num_samples'] = dset_meta['num_row']
except:
# KSM: Commented out because original dataset may no longer be accessible.
#tmp_df = dsf.retrieve_dataset_by_datasetkey(model_info['dataset_key'], model_info['bucket'])
#model_info['num_samples'] = tmp_df.shape[0]
model_info['num_samples'] = nan
# add model and feature params
# model_uuid appears in model_feature_params and will overwrite the one in model_info
# it's the same uuid, so it should be ok
model_feature_params = extract_model_and_feature_parameters(metadata_dict)
model_info.update(model_feature_params)
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
metric_col = '%s_%s' % (metric_type, subset)
model_info[metric_col] = metrics_dict['prediction_results'][metric_type]
if (model_params['prediction_type'] == 'regression') and (metric_type != 'rms_score'):
metric_col = 'rms_score_%s' % subset
model_info[metric_col] = metrics_dict['prediction_results']['rms_score']
return model_info
# ---------------------------------------------------------------------------------------------------------
def get_best_models_info(col_names=None, bucket='public', pred_type="regression", result_dir=None, PK_pipeline=False,
output_dir='/usr/local/data',
shortlist_key=None, input_dset_keys=None, save_results=False, subset='valid',
metric_type=None, selection_type='max', other_filters={}):
"""
Tabulate parameters and performance metrics for the best models, according to a given metric, trained against
each specified dataset.
Args:
col_names (list of str): List of model tracker collections to search.
bucket (str): Datastore bucket for training datasets.
pred_type (str): Type of models (regression or classification).
result_dir (list of str): Result directories of the models, if model tracker is not supported.
PK_pipeline (bool): Are we being called from PK pipeline?
output_dir (str): Directory to write output table to.
shortlist_key (str): Datastore key for table of datasets to query models for.
input_dset_keys (str or list of str): List of datastore keys for datasets to query models for. Either shortlist_key
or input_dset_keys must be specified, but not both.
save_results (bool): If True, write the table of results to a CSV file.
subset (str): Input dataset subset ('train', 'valid', or 'test') for which metrics are used to select best models.
metric_type (str): Type of performance metric (r2_score, roc_auc_score, etc.) to use to select best models.
selection_type (str): Score criterion ('max' or 'min') to use to select best models.
other_filters (dict): Additional selection criteria to include in model query.
Returns:
top_models_df (DataFrame): Table of parameters and metrics for best models for each dataset.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' needs to be provided.")
return None
top_models_info = []
sort_order = {'max': -1, 'min': 1}
sort_ascending = {'max': False, 'min': True}
if metric_type is None:
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
if other_filters is None:
other_filters = {}
# define dset_keys
if input_dset_keys is not None and shortlist_key is not None:
raise ValueError("You can specify either shortlist_key or input_dset_keys but not both.")
elif input_dset_keys is not None and shortlist_key is None:
if type(input_dset_keys) == str:
dset_keys = [input_dset_keys]
else:
dset_keys = input_dset_keys
elif input_dset_keys is None and shortlist_key is None:
raise ValueError('Must specify either input_dset_keys or shortlist_key')
else:
dset_keys = dsf.retrieve_dataset_by_datasetkey(shortlist_key, bucket)
if dset_keys is None:
# define dset_keys, col_names and buckets from shortlist file
shortlist = pd.read_csv(shortlist_key)
if 'dataset_key' in shortlist.columns:
dset_keys = shortlist['dataset_key'].unique()
elif 'task_name' in shortlist.columns:
dset_keys = shortlist['task_name'].unique()
else:
dset_keys = shortlist.values
if 'collection' in shortlist.columns:
col_names = shortlist['collection'].unique()
if 'bucket' in shortlist.columns:
bucket = shortlist['bucket'].unique()
if mlmt_supported and col_names is not None:
mlmt_client = dsf.initialize_model_tracker()
if type(col_names) == str:
col_names = [col_names]
if type(bucket) == str:
bucket=[bucket]
# Get the best model over all collections for each dataset
for dset_key in dset_keys:
dset_key = dset_key.strip()
dset_model_info = []
for col_name in col_names:
for buck in bucket:
try:
query_params = {
"match_metadata": {
"training_dataset.dataset_key": dset_key,
"training_dataset.bucket": buck,
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
"subset": subset,
"$sort": [{"prediction_results.%s" % metric_type : sort_order[selection_type]}]
},
}
query_params['match_metadata'].update(other_filters)
try:
print('Querying collection %s for models trained on dataset %s, %s' % (col_name, buck, dset_key))
metadata_list = list(mlmt_client.model.query_model_metadata(
collection_name=col_name,
query_params=query_params,
limit=1
).result())
except Exception as e:
print("Error returned when querying the best model for dataset %s in collection %s" % (dset_key, col_name))
print(e)
continue
if len(metadata_list) == 0:
print("No models returned for dataset %s in collection %s" % (dset_key, col_name))
continue
print('Query returned %d models' % len(metadata_list))
model = metadata_list[0]
model_info = get_best_perf_table(metric_type, col_name, metadata_dict=model, PK_pipe=PK_pipeline)
if model_info is not None:
res_df = pd.DataFrame.from_records([model_info])
dset_model_info.append(res_df)
except Exception as e:
print(e)
continue
metric_col = '%s_%s' % (metric_type, subset)
if len(dset_model_info) > 0:
dset_model_df = pd.concat(dset_model_info, ignore_index=True).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(dset_model_df.head(1))
print('Adding data for bucket %s, dset_key %s' % (dset_model_df.bucket.values[0], dset_model_df.dataset_key.values[0]))
elif result_dir:
metric_col = '%s_%s' % (subset, metric_type)
for rd in result_dir:
temp_perf_df = get_filesystem_perf_results(result_dir = rd, pred_type = pred_type).sort_values(
by=metric_col, ascending=sort_ascending[selection_type])
top_models_info.append(temp_perf_df.head(1))
print(f"Adding data from '{rd}' ")
if len(top_models_info) == 0:
print("No metadata found")
return None
top_models_df = pd.concat(top_models_info, ignore_index=True)
if save_results:
os.makedirs(output_dir, exist_ok=True)
if shortlist_key is not None:
# Not including shortlist key right now because some are weirdly formed and have .csv in the middle
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata.csv'), index=False)
else:
for dset_key in input_dset_keys:
# TODO: This doesn't make sense; why output multiple copies of the same table?
shortened_key = dset_key.rstrip('.csv')
top_models_df.to_csv(os.path.join(output_dir, 'best_models_metadata_%s.csv' % shortened_key), index=False)
return top_models_df
# TODO: This function looks like work in progress, should we delete it?
'''
#---------------------------------------------------------------------------------------------------------
def _get_best_grouped_models_info(collection='pilot_fixed', pred_type='regression', top_n=1, subset='test'):
"""
Get results for models in the given collection.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
res_dir = '/usr/local/data/%s_perf' % collection
plt_dir = '%s/Plots' % res_dir
os.makedirs(plt_dir, exist_ok=True)
res_files = os.listdir(res_dir)
suffix = '_%s_model_perf_metrics.csv' % collection
if pred_type == 'regression':
metric_type = 'r2_score'
else:
metric_type = 'roc_auc_score'
for res_file in res_files:
try:
if not res_file.endswith(suffix):
continue
res_path = os.path.join(res_dir, res_file)
res_df = pd.read_csv(res_path, index_col=False)
res_df['combo'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
dset_name = res_file.replace(suffix, '')
datasets.append(dset_name)
res_df['dataset'] = dset_name
print(dset_name)
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
res_df['model_type/feat'] = ['%s/%s' % (m,f) for m, f in zip(res_df.model_type.values, res_df.featurizer.values)]
res_df = res_df.sort_values('{0}_{1}'.format(metric_type, subset), ascending=False)
grouped_df = res_df.groupby('model_type/feat').apply(
lambda t: t.head(top_n)
).reset_index(drop=True)
top_grouped_models.append(grouped_df)
top_combo = res_df['model_type/feat'].values[0]
top_combo_dsets.append(top_combo + dset_name.lstrip('ATOM_GSK_dskey'))
top_score = res_df['{0}_{1}'.format(metric_type, subset)].values[0]
top_model_feat.append(top_combo)
top_scores.append(top_score)
num_samples.append(res_df['Dataset Size'][0])
'''
#------------------------------------------------------------------------------------------------------------------
def get_umap_nn_model_perf_table(dataset_key, bucket, collection_name, pred_type='regression'):
"""
Load performance metrics from model tracker for all NN models with the given prediction_type saved in
the model tracker DB under a given collection that were trained against a particular dataset. Show
parameter settings for UMAP transformer for models where they are available.
Args:
dataset_key (str): Dataset key for training dataset.
bucket (str): Dataset bucket for training dataset.
collection_name (str): Name of model tracker collection to search for models.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
query_params = {
"match_metadata": {
"training_dataset.bucket": bucket,
"training_dataset.dataset_key": dataset_key,
"model_parameters.model_type" : "NN",
"model_parameters.prediction_type" : pred_type
},
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
query_params['match_metadata'].update(other_filters)
print("Finding models trained on %s dataset %s" % (bucket, dataset_key))
mlmt_client = dsf.initialize_model_tracker()
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
if metadata_list == []:
print("No matching models returned")
return
else:
print("Found %d matching models" % len(metadata_list))
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
featurizer_list = []
best_epoch_list = []
max_epochs_list = []
feature_transform_type_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
sort_metric = 'r2_score'
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
sort_metric = 'roc_auc_score'
metrics = ['roc_auc_score', 'prc_auc_score', 'matthews_cc', 'kappa', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
for metadata_dict in metadata_list:
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
if len(metrics_dicts) > 3:
raise Exception('Got more than one set of best epoch metrics for model %s' % model_uuid)
subset_metrics = {}
for metrics_dict in metrics_dicts:
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
model_type = model_params['model_type']
if model_type != 'NN':
continue
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
learning_rate=learning_rate_list,
dropouts=dropouts_list,
layer_sizes=layer_sizes_list,
featurizer=featurizer_list,
best_epoch=best_epoch_list,
max_epochs=max_epochs_list,
feature_transform_type=feature_transform_type_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list ))
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (metric, subset)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = '%s_valid' % sort_metric
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_tarball_perf_table(model_tarball, pred_type='classification'):
"""
Retrieve model metadata and performance metrics for a model saved as a tarball (.tar.gz) file.
Args:
model_tarball (str): Path of model tarball file, named as model.tar.gz.
pred_type (str): Prediction type ('classification' or 'regression') of model.
Returns:
tuple (pd.DataFrame, dict): Table of performance metrics and a dictionary of model metadata.
"""
tarf_content = tarfile.open(model_tarball, "r")
metadata_file = tarf_content.getmember("./model_metadata.json")
ext_metadata = tarf_content.extractfile(metadata_file)
meta_json = json.load(ext_metadata)
ext_metadata.close()
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score',
'accuracy_score', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = [0,0]
for emet in meta_json["training_metrics"]:
label = emet["label"]
score_ix = 0 if label == "best" else 1
subset = emet["subset"]
for metric in metrics:
score_dict[subset][metric][score_ix] = emet["prediction_results"][metric]
perf_df = pd.DataFrame()
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
return perf_df, meta_json
#------------------------------------------------------------------------------------------------------------------
def get_filesystem_perf_results(result_dir, pred_type='classification'):
"""
Retrieve metadata and performance metrics for models stored in the filesystem from a hyperparameter search run.
Args:
result_dir (str): Root directory for results from a hyperparameter search training run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics.
"""
ampl_version_list = []
model_uuid_list = []
model_type_list = []
featurizer_list = []
dataset_key_list = []
splitter_list = []
model_score_type_list = []
feature_transform_type_list = []
# model type specific lists
param_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['r2_score', 'rms_score', 'mae_score', 'num_compounds']
else:
metrics = ['roc_auc_score', 'prc_auc_score', 'precision', 'recall_score', 'num_compounds',
'accuracy_score', 'bal_accuracy', 'npv', 'matthews_cc', 'kappa', 'cross_entropy', 'confusion_matrix']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
score_dict['valid']['model_choice_score'] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
tar_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
# collect all tars for later
tar_list = tar_list + [os.path.join(dirpath, f) for f in filenames if f.endswith('.tar.gz')]
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
if meta_dict['model_parameters']['prediction_type']==pred_type:
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
# build dictonary of tarball names
tar_dict = {os.path.basename(tf):tf for tf in tar_list}
path_list = []
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
dataset_key = metadata_dict['training_dataset']['dataset_key']
dataset_name = mp.build_tarball_name(mp.build_dataset_name(dataset_key), model_uuid)
if dataset_name in tar_dict:
path_list.append(tar_dict[dataset_name])
else:
# unable to find saved tar file
path_list.append('')
# Get list of training run metrics for this model
if len(metrics_dicts) < 3:
print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
ampl_version = model_params['ampl_version']
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
model_score_type = model_params['model_choice_score_type']
model_score_type_list.append(model_score_type)
featurizer = model_params['featurizer']
#mix ecfp, graphconv, moe, mordred, rdkit for concise representation
if featurizer in ["computed_descriptors", "descriptors"]:
featurizer = metadata_dict["descriptor_specific"]["descriptor_type"]
featurizer_list.append(featurizer)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
dataset_key_list.append(metadata_dict['training_dataset']['dataset_key'])
feature_transform_type = metadata_dict['training_dataset']['feature_transform_type']
feature_transform_type_list.append(feature_transform_type)
param_list.append(extract_model_and_feature_parameters(metadata_dict))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
score_dict['valid']['model_choice_score'].append(subset_metrics['valid']['model_choice_score'])
param_df = pd.DataFrame(param_list)
perf_df = pd.DataFrame(dict(
model_uuid=model_uuid_list,
model_path = path_list,
ampl_version=ampl_version_list,
model_type=model_type_list,
dataset_key=dataset_key_list,
featurizer=featurizer_list,
splitter=splitter_list,
model_score_type=model_score_type_list,
feature_transform_type=feature_transform_type_list))
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
perf_df['model_choice_score'] = score_dict['valid']['model_choice_score']
for subset in subsets:
for metric in metrics:
metric_col = '%s_%s' % (subset, metric)
perf_df[metric_col] = score_dict[subset][metric]
sort_by = 'model_choice_score'
perf_df = perf_df.sort_values(sort_by, ascending=False)
return perf_df
def get_filesystem_models(result_dir, pred_type):
"""
Identify all models in result_dir and create perf_result table with 'tarball_path' column containing a path
to each tarball.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
#best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
perf_df['dataset_names'] = perf_df['dataset_key'].apply(lambda f: os.path.splitext(os.path.basename(f))[0])
perf_df['tarball_names'] = perf_df.apply(lambda x: '%s_model_%s.tar.gz' % (x['dataset_names'], x['model_uuid']), axis=1)
tarball_names = set(perf_df['tarball_names'].values)
all_filenames = []
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if fn in tarball_names:
all_filenames.append((fn, os.path.join(dirpath, fn)))
found_files_df = pd.DataFrame({'tarball_names':[f[0] for f in all_filenames],
'tarball_paths':[f[1] for f in all_filenames]})
perf_df = perf_df.merge(found_files_df, on='tarball_names', how='outer')
return perf_df
#------------------------------------------------------------------------------------------------------------------
def copy_best_filesystem_models(result_dir, dest_dir, pred_type, force_update=False):
"""
Identify the best models for each dataset within a result directory tree (e.g. from a hyperparameter search).
Copy the associated model tarballs to a destination directory.
Args:
result_dir (str): Path to model training result directory.
dest_dir (str): Path of directory wherre model tarballs will be copied to.
pred_type (str): Prediction type ('classification' or 'regression') of models to copy
force_update (bool): If true, overwrite tarball files that already exist in dest_dir.
Returns:
pd.DataFrame: Table of performance metrics for best models.
"""
perf_df = get_filesystem_perf_results(result_dir, pred_type)
if pred_type == 'regression':
metric = 'valid_r2_score'
else:
metric = 'valid_roc_auc_score'
best_df = perf_df.sort_values(by=metric, ascending=False).drop_duplicates(subset='dataset_key').copy()
dataset_names = [os.path.splitext(os.path.basename(f))[0] for f in best_df.dataset_key.values]
model_uuids = best_df.model_uuid.values
tarball_names = ['%s_model_%s.tar.gz' % (dset_name, model_uuid) for dset_name, model_uuid in zip(dataset_names, model_uuids)]
for dirpath, dirnames, filenames in os.walk(result_dir):
for fn in filenames:
if (fn in tarball_names) and (force_update or not os.path.exists(os.path.join(dest_dir, fn))):
shutil.copy2(os.path.join(dirpath, fn), dest_dir)
print('Copied %s' % fn)
return best_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_perf_tables(collection_names=None, filter_dict={}, result_dir=None, prediction_type='regression', verbose=False):
"""
Load model parameters and performance metrics from model tracker for all models saved in the model tracker DB under
the given collection names (or result directory if Model tracker is not available) with the given prediction type.
Tabulate the parameters and metrics including:
dataset (assay name, target, parameter, key, bucket)
dataset size (train/valid/test/total)
number of training folds
model type (NN or RF)
featurizer
transformation type
metrics: r2_score, mae_score and rms_score for regression, or ROC AUC for classification
Args:
collection_names (list): Names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
result_dir (str or list): Directories to search for models; must be provided if the model tracker DB is not available.
prediction_type (str): Type of models (classification or regression) to query.
verbose (bool): If true, print status messages as collections are processed.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported and not result_dir:
print("Model tracker not supported in your environment; can examine models saved in filesystem only, 'result_dir' is needed.")
return None
collection_list = []
ampl_version_list=[]
model_uuid_list = []
time_built_list = []
model_type_list = []
dataset_key_list = []
bucket_list = []
param_list = []
featurizer_list = []
desc_type_list = []
transform_list = []
dset_size_list = []
splitter_list = []
split_strategy_list = []
split_uuid_list = []
umap_dim_list = []
umap_targ_wt_list = []
umap_neighbors_list = []
umap_min_dist_list = []
split_uuid_list=[]
model_feat_param_list = []
if prediction_type == 'regression':
score_types = ['r2_score', 'mae_score', 'rms_score']
else:
# TODO: add more classification metrics later
score_types = ['roc_auc_score', 'prc_auc_score', 'accuracy_score', 'bal_accuracy', 'precision', 'recall_score', 'npv', 'matthews_cc', 'kappa']
subsets = ['train', 'valid', 'test']
score_dict = {}
ncmpd_dict = {}
for subset in subsets:
score_dict[subset] = {}
for score_type in score_types:
score_dict[subset][score_type] = []
ncmpd_dict[subset] = []
metadata_list_dict = {}
if mlmt_supported and collection_names:
mlmt_client = dsf.initialize_model_tracker()
filter_dict['model_parameters.prediction_type'] = prediction_type
for collection_name in collection_names:
print("Finding models in collection %s" % collection_name)
query_params = {
"match_metadata": filter_dict,
"match_metrics": {
"metrics_type": "training", # match only training metrics
"label": "best",
},
}
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
).result()
metadata_list_dict[collection_name] = metadata_list
elif result_dir:
if isinstance(result_dir, str):
result_dir = [result_dir]
for rd in result_dir:
if rd not in metadata_list_dict:
metadata_list_dict[rd] = []
for dirpath, dirnames, filenames in os.walk(rd):
if "model_metadata.json" in filenames:
with open(os.path.join(dirpath, 'model_metadata.json')) as f:
metadata_dict = json.load(f)
metadata_list_dict[rd].append(metadata_dict)
for ss in metadata_list_dict:
for i, metadata_dict in enumerate(metadata_list_dict[ss]):
if (i % 10 == 0) and verbose:
print('Processing collection %s model %d' % (ss, i))
# Check that model has metrics before we go on
if not 'training_metrics' in metadata_dict:
continue
collection_list.append(ss)
model_uuid = metadata_dict['model_uuid']
model_uuid_list.append(model_uuid)
time_built = metadata_dict['time_built']
time_built_list.append(time_built)
model_params = metadata_dict['model_parameters']
ampl_version = model_params.get('ampl_version', 'probably 1.0.0')
ampl_version_list.append(ampl_version)
model_type = model_params['model_type']
model_type_list.append(model_type)
featurizer = model_params['featurizer']
featurizer_list.append(featurizer)
if 'descriptor_specific' in metadata_dict:
desc_type = metadata_dict['descriptor_specific']['descriptor_type']
elif featurizer in ['graphconv', 'ecfp']:
desc_type = featurizer
else:
desc_type = ''
desc_type_list.append(desc_type)
dataset_key = metadata_dict['training_dataset']['dataset_key']
bucket = metadata_dict['training_dataset']['bucket']
dataset_key_list.append(dataset_key)
bucket_list.append(bucket)
dset_metadata = metadata_dict['training_dataset']['dataset_metadata']
param = metadata_dict['training_dataset']['response_cols'][0]
param_list.append(param)
transform_type = metadata_dict['training_dataset']['feature_transform_type']
transform_list.append(transform_type)
split_params = metadata_dict['splitting_parameters']
splitter_list.append(split_params['splitter'])
split_uuid_list.append(split_params.get('split_uuid', ''))
split_strategy = split_params['split_strategy']
split_strategy_list.append(split_strategy)
if 'umap_specific' in metadata_dict:
umap_params = metadata_dict['umap_specific']
umap_dim_list.append(umap_params['umap_dim'])
umap_targ_wt_list.append(umap_params['umap_targ_wt'])
umap_neighbors_list.append(umap_params['umap_neighbors'])
umap_min_dist_list.append(umap_params['umap_min_dist'])
else:
umap_dim_list.append(nan)
umap_targ_wt_list.append(nan)
umap_neighbors_list.append(nan)
umap_min_dist_list.append(nan)
model_feat_param_list.append(extract_model_and_feature_parameters(metadata_dict))
# Get model metrics for this model
metrics_dicts = metadata_dict['training_metrics']
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
if split_strategy == 'k_fold_cv':
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['test']['num_compounds']
else:
dset_size = subset_metrics['train']['num_compounds'] + subset_metrics['valid']['num_compounds'] + subset_metrics['test']['num_compounds']
for subset in subsets:
subset_size = subset_metrics[subset]['num_compounds']
for score_type in score_types:
try:
score = subset_metrics[subset][score_type]
except KeyError:
score = float('nan')
score_dict[subset][score_type].append(score)
ncmpd_dict[subset].append(subset_size)
dset_size_list.append(dset_size)
col_dict = dict(
collection=collection_list,
ampl_version=ampl_version_list,
model_uuid=model_uuid_list,
time_built=time_built_list,
model_type=model_type_list,
featurizer=featurizer_list,
features=desc_type_list,
transformer=transform_list,
splitter=splitter_list,
split_strategy=split_strategy_list,
split_uuid=split_uuid_list,
umap_dim=umap_dim_list,
umap_targ_wt=umap_targ_wt_list,
umap_neighbors=umap_neighbors_list,
umap_min_dist=umap_min_dist_list,
dataset_bucket=bucket_list,
dataset_key=dataset_key_list,
dataset_size=dset_size_list,
parameter=param_list
)
perf_df = pd.DataFrame(col_dict)
param_df = pd.DataFrame(model_feat_param_list)
perf_df = perf_df.merge(param_df, on='model_uuid', how='inner')
for subset in subsets:
ncmpds_col = '%s_size' % subset
perf_df[ncmpds_col] = ncmpd_dict[subset]
for score_type in score_types:
metric_col = '%s_%s' % (subset, score_type)
perf_df[metric_col] = score_dict[subset][score_type]
return perf_df
#------------------------------------------------------------------------------------------------------------------
def get_summary_metadata_table(uuids, collections=None):
"""
Tabulate metadata fields and performance metrics for a set of models identified by specific model_uuids.
Args:
uuids (list): List of model UUIDs to query.
collections (list or str): Names of collections in model tracker DB to get models from. If collections is
a string, it must identify one collection to search for all models. If a list, it must be of the same
length as `uuids`. If not provided, all collections will be searched.
Returns:
pd.DataFrame: Table of metadata fields and performance metrics for models.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
if isinstance(uuids,str):
uuids = [uuids]
if isinstance(collections,str):
collections = [collections] * len(uuids)
mlist = []
mlmt_client = dsf.initialize_model_tracker()
for idx,uuid in enumerate(uuids):
if collections is not None:
collection_name = collections[idx]
else:
collection_name = trkr.get_model_collection_by_uuid(uuid)
model_meta = trkr.get_full_metadata_by_uuid(uuid, collection_name=collection_name)
mdl_params = model_meta['model_parameters']
data_params = model_meta['training_dataset']
# Get model metrics for this model
metrics = pd.DataFrame(model_meta['training_metrics'])
metrics = metrics[metrics['label']=='best']
train_metrics = metrics[metrics['subset']=='train']['prediction_results'].values[0]
valid_metrics = metrics[metrics['subset']=='valid']['prediction_results'].values[0]
test_metrics = metrics[metrics['subset']=='test']['prediction_results'].values[0]
# Try to name the model something intelligible in the table
name = 'NA'
if 'target' in data_params['dataset_metadata']:
name = data_params['dataset_metadata']['target']
if (name == 'NA') & ('assay_endpoint' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['assay_endpoint']
if (name == 'NA') & ('response_col' in data_params['dataset_metadata']):
name = data_params['dataset_metadata']['response_col']
if name != 'NA':
if 'param' in data_params['dataset_metadata'].keys():
name = name + ' ' + data_params['dataset_metadata']['param']
else:
name = 'unknown'
transform = 'None'
if 'transformation' in data_params['dataset_metadata'].keys():
transform = data_params['dataset_metadata']['transformation']
if mdl_params['featurizer'] == 'computed_descriptors':
featurizer = model_meta['descriptor_specific']['descriptor_type']
else:
featurizer = mdl_params['featurizer']
try:
split_uuid = model_meta['splitting_parameters']['split_uuid']
except:
split_uuid = 'Not Available'
if mdl_params['prediction_type'] == 'regression':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'Learning rate': xgb_params['xgb_max_depth'],
'r^2 (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['r2_score'], valid_metrics['r2_score'], test_metrics['r2_score']),
'MAE (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['mae_score'], valid_metrics['mae_score'], test_metrics['mae_score']),
'RMSE(Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['rms_score'], valid_metrics['rms_score'], test_metrics['rms_score']),
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
elif mdl_params['prediction_type'] == 'classification':
if mdl_params['model_type'] == 'NN':
nn_params = model_meta['nn_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{str(train_metrics['confusion_matrix'])}/{str(valid_metrics['confusion_matrix'])}/{str(test_metrics['confusion_matrix'])}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Layer Sizes': nn_params['layer_sizes'],
'Optimizer': nn_params['optimizer_type'],
'Learning Rate': nn_params['learning_rate'],
'Dropouts': nn_params['dropouts'],
'Best Epoch (Max)': '%i (%i)' % (nn_params['best_epoch'],nn_params['max_epochs']),
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'RF':
rf_params = model_meta['rf_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Max Depth': rf_params['rf_max_depth'],
'Max Features': rf_params['rf_max_depth'],
'RF Estimators': rf_params['rf_estimators'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
elif mdl_params['model_type'] == 'xgboost':
xgb_params = model_meta['xgb_specific']
minfo = {'Name': name,
'Transformation': transform,
'AMPL version used:': mdl_params.get('ampl_version', 'probably 1.0.0'),
'Model Type (Featurizer)': '%s (%s)' % (mdl_params['model_type'],featurizer),
'Gamma': xgb_params['xgb_gamma'],
'XGB Learning rate': xgb_params['xgb_max_depth'],
'ROC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['roc_auc_score'], valid_metrics['roc_auc_score'], test_metrics['roc_auc_score']),
'PRC AUC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['prc_auc_score'], valid_metrics['prc_auc_score'], test_metrics['prc_auc_score']),
'Balanced accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics.get('bal_accuracy', np.nan), valid_metrics.get('bal_accuracy',np.nan), test_metrics.get('bal_accuracy', np.nan)),
'Accuracy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['accuracy_score'], valid_metrics['accuracy_score'], test_metrics['accuracy_score']),
'Precision (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['precision'], valid_metrics['precision'], test_metrics['precision']),
'Recall (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['recall_score'], valid_metrics['recall_score'], test_metrics['recall_score']),
'NPV (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['npv'], valid_metrics['npv'], test_metrics['npv']),
'Kappa (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['kappa'], valid_metrics['kappa'], test_metrics['kappa']),
'Matthews CC (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['matthews_cc'], valid_metrics['matthews_cc'], test_metrics['matthews_cc']),
'Cross entropy (Train/Valid/Test)': '%0.2f/%0.2f/%0.2f' % (train_metrics['cross_entropy'], valid_metrics['cross_entropy'], test_metrics['cross_entropy']),
'Confusion matrices (Train/Valid/Test)': f"{train_metrics['confusion_matrix']}/{valid_metrics['confusion_matrix']}/{test_metrics['confusion_matrix']}",
'Data Size (Train/Valid/Test)': '%i/%i/%i' % (train_metrics["num_compounds"],valid_metrics["num_compounds"],test_metrics["num_compounds"]),
'Splitter': model_meta['splitting_parameters']['splitter'],
'Collection': collection_name,
'UUID': model_meta['model_uuid'],
'Split UUID': split_uuid,
'Dataset Key': data_params['dataset_key']}
else:
architecture = 'unknown'
mlist.append(OrderedDict(minfo))
return pd.DataFrame(mlist).set_index('Name').transpose()
#------------------------------------------------------------------------------------------------------------------
def get_training_datasets(collection_names):
"""
Query the model tracker DB for all the unique dataset keys and buckets used to train models in the given
collections.
Args:
collection_names (list): List of names of model tracker collections to search for models.
Returns:
dict: Dictionary mapping collection names to lists of (dataset_key, bucket) tuples for training sets.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = mlmt_client.model.get_training_datasets(collection_name=collection_name).result()
result_dict[collection_name] = dset_list
return result_dict
#------------------------------------------------------------------------------------------------------------------
def get_dataset_models(collection_names, filter_dict={}):
"""
Query the model tracker for all models saved in the model tracker DB under the given collection names. Returns a dictionary
mapping (dataset_key,bucket) pairs to the list of (collection,model_uuid) pairs trained on the corresponding datasets.
Args:
collection_names (list): List of names of model tracker collections to search for models.
filter_dict (dict): Additional filter criteria to use in model query.
Returns:
dict: Dictionary mapping training set (dataset_key, bucket) tuples to (collection, model_uuid) pairs.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
result_dict = {}
coll_dset_dict = get_training_dict(collection_names)
mlmt_client = dsf.initialize_model_tracker()
for collection_name in collection_names:
dset_list = coll_dset_dict[collection_name]
for dset_dict in dset_list:
query_filter = {
'training_dataset.bucket': dset_dict['bucket'],
'training_dataset.dataset_key': dset_dict['dataset_key']
}
query_filter.update(filter_dict)
query_params = {
"match_metadata": query_filter
}
print('Querying models in collection %s for dataset %s, %s' % (collection_name, bucket, dset_key))
metadata_list = mlmt_client.model.query_model_metadata(
collection_name=collection_name,
query_params=query_params,
include_fields=['model_uuid']
).result()
for i, metadata_dict in enumerate(metadata_list):
if i % 50 == 0:
print('Processing collection %s model %d' % (collection_name, i))
model_uuid = metadata_dict['model_uuid']
result_dict.setdefault((dset_key,bucket), []).append((collection_name, model_uuid))
return result_dict
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
if pred_type == 'regression':
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_r2_score']
else:
model_params = ['model_uuid', 'learning_rate', 'layer_sizes', 'dropouts', 'max_epochs', 'best_epoch',
'subset', 'num_compounds', 'mean_roc_auc_score']
param_list = model_params + response_cols
perf_df = pd.DataFrame(dict(col_0=param_list))
colnum = 0
for i in range(num_models):
for subset in subsets:
vals = []
if subset == 'train':
vals.append(model_uuid_list[i])
vals.append(learning_rate_list[i])
vals.append(layer_sizes_list[i])
vals.append(dropouts_list[i])
vals.append('%d' % max_epochs_list[i])
vals.append('%d' % best_epoch_list[i])
else:
vals = vals + ['']*6
vals.append(subset)
vals.append('%d' % score_dict[subset]['num_compounds'][i])
if pred_type == 'regression':
vals.append('%.3f' % score_dict[subset]['r2_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_r2_scores'][i]]
else:
vals.append('%.3f' % score_dict[subset]['roc_auc_score'][i])
vals = vals + ['%.3f' % v for v in score_dict[subset]['task_roc_auc_scores'][i]]
colnum += 1
colname = 'col_%d' % colnum
perf_df[colname] = vals
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_files_new(result_dir, pred_type='regression'):
"""
Retrieve model metadata and performance metrics stored in the filesystem from a multitask hyperparameter search.
Format the per-task performance metrics in a table with a row for each task and columns for each model/subset
combination.
Args:
result_dir (str): Path to root result directory containing output from a hyperparameter search run.
pred_type (str): Prediction type ('classification' or 'regression') of models to query.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
model_uuid_list = []
learning_rate_list = []
dropouts_list = []
layer_sizes_list = []
best_epoch_list = []
max_epochs_list = []
featurizer_list = []
subsets = ['train', 'valid', 'test']
if pred_type == 'regression':
metrics = ['num_compounds', 'r2_score', 'task_r2_scores',
'task_rms_scores']
else:
metrics = ['num_compounds', 'roc_auc_score', 'task_roc_auc_scores']
score_dict = {}
for subset in subsets:
score_dict[subset] = {}
for metric in metrics:
score_dict[subset][metric] = []
# Navigate the results directory tree
model_list = []
metrics_list = []
for dirpath, dirnames, filenames in os.walk(result_dir):
if ('model_metadata.json' in filenames) and ('model_metrics.json' in filenames):
meta_path = os.path.join(dirpath, 'model_metadata.json')
with open(meta_path, 'r') as meta_fp:
meta_dict = json.load(meta_fp)
model_list.append(meta_dict)
metrics_path = os.path.join(dirpath, 'model_metrics.json')
with open(metrics_path, 'r') as metrics_fp:
metrics_dicts = json.load(metrics_fp)
metrics_list.append(metrics_dicts)
print("Found data for %d models under %s" % (len(model_list), result_dir))
for metadata_dict, metrics_dicts in zip(model_list, metrics_list):
model_uuid = metadata_dict['model_uuid']
#print("Got metadata for model UUID %s" % model_uuid)
# Get list of training run metrics for this model
#print("Got %d metrics dicts for model %s" % (len(metrics_dicts), model_uuid))
if len(metrics_dicts) < 3:
raise Exception("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#print("Got no or incomplete metrics for model %s, skipping..." % model_uuid)
#continue
subset_metrics = {}
for metrics_dict in metrics_dicts:
if metrics_dict['label'] == 'best':
subset = metrics_dict['subset']
subset_metrics[subset] = metrics_dict['prediction_results']
model_uuid_list.append(model_uuid)
model_params = metadata_dict['model_parameters']
dset_params = metadata_dict['training_dataset']
response_cols = dset_params['response_cols']
nn_params = metadata_dict['nn_specific']
max_epochs_list.append(nn_params['max_epochs'])
best_epoch_list.append(nn_params['best_epoch'])
learning_rate_list.append(nn_params['learning_rate'])
layer_sizes_list.append(','.join(['%d' % s for s in nn_params['layer_sizes']]))
dropouts_list.append(','.join(['%.2f' % d for d in nn_params['dropouts']]))
featurizer_list.append(model_params["featurizer"])
for subset in subsets:
for metric in metrics:
score_dict[subset][metric].append(subset_metrics[subset][metric])
# Format the data as a table with groups of 3 columns for each model
num_models = len(model_uuid_list)
data = {
"model_uuid": model_uuid_list,
"learning_rate": learning_rate_list,
"layer_sizes": layer_sizes_list,
"dropouts": dropouts_list,
"featurizer": featurizer_list
}
for i in range(num_models):
for subset in subsets:
for ix, task in enumerate(response_cols):
if pred_type == "regression":
colr2 = f"{subset}_{task}_r2"
colrms = f"{subset}_{task}_rms"
if colr2 not in data:
data[colr2] = []
data[colrms] = []
data[colr2].append(score_dict[subset]["task_r2_scores"][i][ix])
data[colrms].append(score_dict[subset]["task_rms_scores"][i][ix])
else:
colauc = f"{subset}_{task}_roc_auc"
if colauc not in data:
data[colauc] = []
data[colauc].append(score_dict[subset]["task_roc_auc_scores"][i][ix])
perf_df = pd.DataFrame(data)
return perf_df
#-------------------------------------------------------------------------------------------------------------------
def get_multitask_perf_from_tracker(collection_name, response_cols=None, expand_responses=None, expand_subsets='test',
exhaustive=False):
"""
Retrieve full metadata and metrics from model tracker for all models in a collection and format them
into a table, including per-task performance metrics for multitask models.
Meant for multitask NN models, but works for single task models as well.
By AKP. Works for model tracker as of 10/2020
Args:
collection_name (str): Name of model tracker collection to search for models.
response_cols (list, str or None): Names of tasks (response columns) to query performance results for.
If None, checks to see if the entire collection has the same response cols.
Otherwise, should be list of strings or a comma-separated string.
asks for clarification. Note: make sure response cols are listed in same order as in metadata.
Recommended: None first, then clarify.
expand_responses (list, str or None): Names of tasks / response columns you want to include results for in
the final dataframe. Useful if you have a lot of tasks and only want to look at the performance of a
few of them. Must also be a list or comma separated string, and must be a subset of response_cols.
If None, will expand all responses.
expand_subsets (list, str or None): Dataset subsets ('train', 'valid' and/or 'test') to show metrics for.
Again, must be list or comma separated string, or None to expand all.
exhaustive (bool): If True, return large dataframe with all model tracker metadata minus any columns not
in expand_responses. If False, return trimmed dataframe with most relevant columns.
Returns:
pd.DataFrame: Table of model metadata fields and performance metrics.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return None
# check inputs are correct
if collection_name.startswith('old_'):
raise Exception("This function is not implemented for the old format of metadata.")
if isinstance(response_cols, list):
pass
elif response_cols is None:
pass
elif isinstance(response_cols, str):
response_cols=[x.strip() for x in response_cols.split(',')]
else:
raise Exception("Please input response cols as None, list or comma separated string.")
if isinstance(expand_responses, list):
pass
elif expand_responses is None:
pass
elif isinstance(expand_responses, str):
expand_responses=[x.strip() for x in expand_responses.split(',')]
else:
raise Exception("Please input expand response col(s) as list or comma separated string.")
if isinstance(expand_subsets, list):
pass
elif expand_subsets is None:
pass
elif isinstance(expand_subsets, str):
expand_subsets=[x.strip() for x in expand_subsets.split(',')]
else:
raise Exception("Please input subset(s) as list or comma separated string.")
# get metadata
if response_cols is not None:
filter_dict={'training_dataset.response_cols': response_cols}
else:
filter_dict={}
models = trkr.get_full_metadata(filter_dict, collection_name)
if len(models)==0:
raise Exception("No models found with these response cols in this collection. To get a list of possible response cols, pass response_cols=None.")
models = pd.DataFrame.from_records(models)
# expand model metadata - deal with NA descriptors / NA other fields
alldat=models[['model_uuid', 'time_built']]
models=models.drop(['model_uuid', 'time_built'], axis = 1)
for column in models.columns:
if column == 'training_metrics':
continue
nai=models[models[column].isna()].index
nonas=models[~models[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
alldat=alldat.join(tempdf)
# assign response cols
if len(alldat.response_cols.astype(str).unique())==1:
response_cols=alldat.response_cols[0]
print("Response cols:", response_cols)
else:
raise Exception(f"There is more than one set of response cols in this collection. Please choose from these lists: {alldat.response_cols.unique()}")
# expand training metrics - deal with NA's in columns
metrics=pd.DataFrame.from_dict(models['training_metrics'].tolist())
allmet=alldat[['model_uuid']]
for column in metrics.columns:
nai=metrics[metrics[column].isna()].index
nonas=metrics[~metrics[column].isna()]
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
label=tempdf[f'label'][nonas.index[0]]
metrics_type=tempdf[f'metrics_type'][nonas.index[0]]
subset=tempdf[f'subset'][nonas.index[0]]
nai=tempdf[tempdf[f'prediction_results'].isna()].index
nonas=tempdf[~tempdf[f'prediction_results'].isna()]
tempdf=pd.DataFrame.from_records(nonas[f'prediction_results'].tolist(), index=nonas.index)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
tempdf=tempdf.add_prefix(f'{label}_{subset}_')
allmet=allmet.join(tempdf, lsuffix='', rsuffix="_2")
alldat=alldat.merge(allmet, on='model_uuid')
# expand task level training metrics for subset(s) of interest - deal w/ NA values
if expand_subsets is None:
expand_subsets=['train', 'valid', 'test']
for sub in expand_subsets:
listcols=alldat.columns[alldat.columns.str.contains("task")& alldat.columns.str.contains(sub)]
for column in listcols:
colnameslist=[]
for task in response_cols:
colnameslist.append(f'{column}_{task}')
nai=alldat[alldat[column].isna()].index
nonas=alldat[~alldat[column].isna()]
if isinstance(nonas.loc[nonas.index[0],column], list):
tempdf=pd.DataFrame.from_records(nonas[column].tolist(), index= nonas.index, columns=colnameslist)
tempdf=pd.concat([tempdf, pd.DataFrame(np.nan, index=nai, columns=tempdf.columns)])
alldat = alldat.join(tempdf)
alldat=alldat.drop(columns=column)
else:
print(f"Warning: task-level metadata for {column} not in metadata.")
# make features column
alldat['features'] = alldat['featurizer']
if 'descriptor_type' in alldat.columns:
alldat.loc[alldat.featurizer == 'computed_descriptors', 'features'] = alldat.loc[alldat.featurizer == 'computed_descriptors', 'descriptor_type']
# prune to only include expand_responses
if expand_responses is not None:
removecols= [x for x in response_cols if x not in expand_responses]
for col in removecols:
alldat=alldat.drop(columns=alldat.columns[alldat.columns.str.contains(col)])
# return or prune further and then return
if exhaustive:
return alldat
else:
alldat=alldat.drop(columns=alldat.columns[alldat.columns.str.contains('baseline')])
keepcols=['ampl_version','model_uuid', 'features', 'prediction_type',
'transformers', 'uncertainty', 'batch_size', 'bias_init_consts',
'dropouts', 'layer_sizes', 'learning_rate', 'max_epochs', 'optimizer_type',
'weight_decay_penalty', 'weight_decay_penalty_type', 'weight_init_stddevs', 'splitter',
'split_uuid', 'split_test_frac', 'split_valid_frac', 'smiles_col', 'id_col',
'feature_transform_type', 'response_cols', 'response_transform_type', 'num_model_tasks',
'rf_estimators', 'rf_max_depth', 'rf_max_features', 'xgb_gamma', 'xgb_learning_rate']
keepcols.extend(alldat.columns[alldat.columns.str.contains('best')])
keepcols = list(set(alldat.columns).intersection(keepcols))
keepcols.sort()
alldat=alldat[keepcols]
if sum(alldat.columns.str.contains('_2'))>0:
print("Warning: One or more of your models has metadata for >1 best / >1 baseline epochs.")
return alldat
#-------------------------------------------------------------------------------------------------------------------
def _aggregate_predictions(datasets, bucket, col_names, result_dir):
"""
Run predictions for best dataset/model_type/split_type/featurizer (max r2 score) and save csv's in /usr/local/data/
DEPRECATED: Will not work in current software environment. Needs to be updated
Args:
datasets (list): List of (dataset_key, bucket) tuples to query models for.
bucket (str): Ignored.
col_names (list): List of names of model tracker collections to search for models.
result_dir (str): Ignored.
Returns:
None.
Todo:
Update for current software environment, or delete function if it's not useful.
"""
if not mlmt_supported:
print("Model tracker not supported in your environment; can examine models saved in filesystem only.")
return
results = []
mlmt_client = dsf.initialize_model_tracker()
for dset_key, bucket in datasets:
for model_type in ['NN', 'RF']:
for split_type in ['scaffold', 'random']:
for descriptor_type in ['mordred_filtered', 'moe', 'rdkit_raw']:
model_filter = {"training_dataset.dataset_key" : dset_key,
"training_dataset.bucket" : bucket,
"ModelMetrics.TrainingRun.label" : "best",
'ModelMetrics.TrainingRun.subset': 'valid',
'ModelMetrics.TrainingRun.PredictionResults.r2_score': ['max', None],
'model_parameters.model_type': model_type,
'model_parameters.featurizer': 'computed_descriptors',
'descriptor_specific.descriptor_type': descriptor_type,
'splitting_parameters.splitter': split_type
}
for col_name in col_names:
model = list(trkr.get_full_metadata(model_filter, collection_name=col_name))
if model:
model = model[0]
result_dir = '/usr/local/data/%s/%s' % (col_name, dset_key.rstrip('.csv'))
result_df = mp.regenerate_results(result_dir, metadata_dict=model)
result_df['dset_key'] = dset_key
actual_col = [col for col in result_df.columns if 'actual' in col][0]
pred_col = [col for col in result_df.columns if 'pred' in col][0]
result_df['error'] = abs(result_df[actual_col] - result_df[pred_col])
result_df['cind'] = | pd.Categorical(result_df['dset_key']) | pandas.Categorical |
import json
from itertools import product
from unittest.mock import ANY, MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
ExplainPredictionsStage,
abs_error,
cross_entropy,
explain_predictions,
explain_predictions_best_worst,
)
from evalml.pipelines import (
BinaryClassificationPipeline,
MulticlassClassificationPipeline,
RegressionPipeline,
TimeSeriesBinaryClassificationPipeline,
TimeSeriesRegressionPipeline,
)
from evalml.pipelines.components.utils import _all_estimators
from evalml.problem_types import ProblemTypes, is_binary, is_multiclass
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
def test_error_metrics():
np.testing.assert_array_equal(
abs_error(pd.Series([1, 2, 3]), pd.Series([4, 1, 0])), np.array([3, 1, 3])
)
np.testing.assert_allclose(
cross_entropy(
pd.Series([1, 0]), pd.DataFrame({"a": [0.1, 0.2], "b": [0.9, 0.8]})
),
np.array([-np.log(0.9), -np.log(0.2)]),
)
input_features_and_y_true = [
(
[[1]],
pd.Series([1]),
"^Input features must be a dataframe with more than 10 rows!",
),
(
pd.DataFrame({"a": [1]}),
pd.Series([1]),
"^Input features must be a dataframe with more than 10 rows!",
),
(
pd.DataFrame({"a": range(15)}),
pd.Series(range(12)),
"^Parameters y_true and input_features must have the same number of data points.",
),
]
@pytest.mark.parametrize(
"input_features,y_true,error_message", input_features_and_y_true
)
def test_explain_predictions_best_worst_value_errors(
input_features, y_true, error_message
):
with pytest.raises(ValueError, match=error_message):
explain_predictions_best_worst(None, input_features, y_true)
def test_explain_predictions_raises_pipeline_score_error():
with pytest.raises(PipelineScoreError, match="Division by zero!"):
def raise_zero_division(input_features):
raise ZeroDivisionError("Division by zero!")
pipeline = MagicMock()
pipeline.problem_type = ProblemTypes.BINARY
pipeline.predict_proba.side_effect = raise_zero_division
explain_predictions_best_worst(
pipeline, pd.DataFrame({"a": range(15)}), pd.Series(range(15))
)
def test_explain_predictions_value_errors():
with pytest.raises(
ValueError, match="Parameter input_features must be a non-empty dataframe."
):
explain_predictions(MagicMock(), | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import pandas.testing as pdt
import pytest
import pytz
from werkzeug.exceptions import RequestEntityTooLarge
from sfa_api.conftest import (
VALID_FORECAST_JSON, VALID_CDF_FORECAST_JSON, demo_forecasts)
from sfa_api.utils import request_handling
from sfa_api.utils.errors import (
BadAPIRequest, StorageAuthError, NotFoundException)
@pytest.mark.parametrize('start,end', [
('invalid', 'invalid'),
('NaT', 'NaT')
])
def test_validate_start_end_fail(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('start,end', [
('20190101T120000Z', '20190101T130000Z'),
('20190101T120000', '20190101T130000'),
('20190101T120000', '20190101T130000Z'),
('20190101T120000Z', '20190101T130000+00:00'),
('20190101T120000Z', '20190101T140000+01:00'),
])
def test_validate_start_end_success(app, forecast_id, start, end):
url = f'/forecasts/single/{forecast_id}/values?start={start}&end={end}'
with app.test_request_context(url):
request_handling.validate_start_end()
@pytest.mark.parametrize('query,exc', [
('?start=20200101T0000Z', {'end'}),
('?end=20200101T0000Z', {'start'}),
('?start=20200101T0000Z&end=20210102T0000Z', {'end'}),
('', {'start', 'end'}),
pytest.param('?start=20200101T0000Z&end=20200102T0000Z', {},
marks=pytest.mark.xfail(strict=True))
])
def test_validate_start_end_not_provided(app, forecast_id, query, exc):
url = f'/forecasts/single/{forecast_id}/values{query}'
with app.test_request_context(url):
with pytest.raises(BadAPIRequest) as err:
request_handling.validate_start_end()
if exc:
assert set(err.value.errors.keys()) == exc
@pytest.mark.parametrize('content_type,payload', [
('text/csv', ''),
('application/json', '{}'),
('application/json', '{"values": "nope"}'),
('text/plain', 'nope'),
])
def test_validate_parsable_fail(app, content_type, payload, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(request_handling.BadAPIRequest):
with app.test_request_context(
url, content_type=content_type, data=payload, method='POST',
content_length=len(payload)):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type', [
('text/csv'),
('application/json'),
('application/json'),
])
def test_validate_parsable_fail_too_large(app, content_type, forecast_id):
url = f'/forecasts/single/{forecast_id}/values/'
with pytest.raises(RequestEntityTooLarge):
with app.test_request_context(
url, content_type=content_type, method='POST',
content_length=17*1024*1024):
request_handling.validate_parsable_values()
@pytest.mark.parametrize('content_type,payload', [
('text/csv', 'timestamp,value\n2019-01-01T12:00:00Z,5'),
('application/json', ('{"values":[{"timestamp": "2019-01-01T12:00:00Z",'
'"value": 5}]}')),
])
def test_validate_parsable_success(app, content_type, payload, forecast_id):
with app.test_request_context(f'/forecasts/single/{forecast_id}/values/',
content_type=content_type, data=payload,
method='POST'):
request_handling.validate_parsable_values()
def test_validate_observation_values():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
request_handling.validate_observation_values(df)
def test_validate_observation_values_bad_value():
df = pd.DataFrame({'value': [0.1, 's.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_no_value():
df = pd.DataFrame({'quality_flag': [0.0, 1],
'timestamp': ['20190101T0000Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'value' in e.value.errors
def test_validate_observation_values_bad_timestamp():
df = pd.DataFrame({'value': [0.1, '.2'],
'quality_flag': [0.0, 1],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
def test_validate_observation_values_no_timestamp():
df = pd.DataFrame({
'value': [0.1, '.2'], 'quality_flag': [0.0, 1]})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'timestamp' in e.value.errors
@pytest.mark.parametrize('quality', [
[1, .1],
[1, '0.9'],
[2, 0],
['ham', 0]
])
def test_validate_observation_values_bad_quality(quality):
df = pd.DataFrame({'value': [0.1, .2],
'quality_flag': quality,
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
def test_validate_observation_values_no_quality():
df = pd.DataFrame({'value': [0.1, '.2'],
'timestamp': ['20190101T008Z',
'2019-01-01T03:00:00+07:00']})
with pytest.raises(BadAPIRequest) as e:
request_handling.validate_observation_values(df)
assert 'quality_flag' in e.value.errors
expected_parsed_df = pd.DataFrame({
'a': [1, 2, 3, 4],
'b': [4, 5, 6, 7],
})
csv_string = "a,b\n1,4\n2,5\n3,6\n4,7\n"
json_string = '{"values":{"a":[1,2,3,4],"b":[4,5,6,7]}}'
def test_parse_csv_success():
test_df = request_handling.parse_csv(csv_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('csv_input', [
'',
"a,b\n1,4\n2.56,2.45\n1,2,3\n"
])
def test_parse_csv_failure(csv_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_csv(csv_input)
def test_parse_json_success():
test_df = request_handling.parse_json(json_string)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('json_input', [
'',
"{'a':[1,2,3]}"
])
def test_parse_json_failure(json_input):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_json(json_input)
null_df = pd.DataFrame({
'timestamp': [
'2018-10-29T12:00:00Z',
'2018-10-29T13:00:00Z',
'2018-10-29T14:00:00Z',
'2018-10-29T15:00:00Z',
],
'value': [32.93, 25.17, None, None],
'quality_flag': [0, 0, 1, 0]
})
def test_parse_csv_nan():
test_df = request_handling.parse_csv("""
# comment line
timestamp,value,quality_flag
2018-10-29T12:00:00Z,32.93,0
2018-10-29T13:00:00Z,25.17,0
2018-10-29T14:00:00Z,,1 # this value is NaN
2018-10-29T15:00:00Z,NaN,0
""")
pdt.assert_frame_equal(test_df, null_df)
def test_parse_json_nan():
test_df = request_handling.parse_json("""
{"values":[
{"timestamp": "2018-10-29T12:00:00Z", "value": 32.93, "quality_flag": 0},
{"timestamp": "2018-10-29T13:00:00Z", "value": 25.17, "quality_flag": 0},
{"timestamp": "2018-10-29T14:00:00Z", "value": null, "quality_flag": 1},
{"timestamp": "2018-10-29T15:00:00Z", "value": null, "quality_flag": 0}
]}
""")
pdt.assert_frame_equal(test_df, null_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'text/csv'),
(csv_string, 'application/vnd.ms-excel'),
(json_string, 'application/json')
])
def test_parse_values_success(app, data, mimetype):
with app.test_request_context():
test_df = request_handling.parse_values(data, mimetype)
pdt.assert_frame_equal(test_df, expected_parsed_df)
@pytest.mark.parametrize('data,mimetype', [
(csv_string, 'application/fail'),
(json_string, 'image/bmp'),
])
def test_parse_values_failure(data, mimetype):
with pytest.raises(request_handling.BadAPIRequest):
request_handling.parse_values(data, mimetype)
@pytest.mark.parametrize('dt_string,expected', [
('20190101T1200Z', pd.Timestamp('20190101T1200Z')),
('20190101T1200', pd.Timestamp('20190101T1200Z')),
('20190101T1200+0700', pd.Timestamp('20190101T0500Z'))
])
def test_parse_to_timestamp(dt_string, expected):
parsed_dt = request_handling.parse_to_timestamp(dt_string)
assert parsed_dt == expected
@pytest.mark.parametrize('dt_string', [
'invalid datetime',
'21454543251345234',
'20190101T2500Z',
'NaT',
])
def test_parse_to_timestamp_error(dt_string):
with pytest.raises(ValueError):
request_handling.parse_to_timestamp(dt_string)
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1150Z')),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0400Z']), 120, None),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0001Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z')),
# out of order
pytest.param(
pd.DatetimeIndex(['2019-09-01T0013Z', '2019-09-01T0006Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-08-31T2352Z'), marks=pytest.mark.xfail),
(pd.date_range(start='2019-03-10 00:00', end='2019-03-10 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.date_range(start='2019-11-03 00:00', end='2019-11-03 05:00',
tz='America/Denver', freq='1h'),
60, None), # DST transition
(pd.DatetimeIndex(['2019-01-01T000132Z']), 33, None),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000132Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2019-01-02T000132Z'))
])
def test_validate_index_period(index, interval_length, previous_time):
request_handling.validate_index_period(index, interval_length,
previous_time)
def test_validate_index_empty():
with pytest.raises(request_handling.BadAPIRequest):
request_handling.validate_index_period(pd.DatetimeIndex([]), 10,
None)
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0200Z',
'2019-09-01T0300Z']), 60),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0300Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='20min'), 10),
])
def test_validate_index_period_missing(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'Missing' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0200Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0045Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='5min'), 10),
])
def test_validate_index_period_extra(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'extra' in errs[0]
@pytest.mark.parametrize('index,interval_length', [
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0100Z',
'2019-09-01T0201Z']), 120),
(pd.DatetimeIndex(['2019-09-01T0000Z', '2019-09-01T0030Z',
'2019-09-01T0130Z']), 30),
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1305Z',
freq='5min'), 10),
])
def test_validate_index_period_other(index, interval_length):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
index[0])
errs = e.value.errors['timestamp']
assert len(errs) > 0
@pytest.mark.parametrize('index,interval_length,previous_time', [
(pd.date_range(start='2019-09-01T1200Z', end='2019-09-01T1300Z',
freq='10min'), 10, pd.Timestamp('2019-09-01T1155Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0011Z',
'2019-09-01T0016Z']),
5,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-09-01T0006Z', '2019-09-01T0013Z',
'2019-09-01T0020Z']),
7,
pd.Timestamp('2019-09-01T0000Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2018-12-01T000232Z')),
(pd.DatetimeIndex(['2019-01-01T000132Z']), 30,
pd.Timestamp('2020-12-01T000232Z'))
])
def test_validate_index_period_previous(index, interval_length, previous_time):
with pytest.raises(request_handling.BadAPIRequest) as e:
request_handling.validate_index_period(index, interval_length,
previous_time)
errs = e.value.errors['timestamp']
assert len(errs) == 1
assert 'previous time' in errs[0]
@pytest.mark.parametrize('ep,res', [
('{"restrict_upload": true}', True),
('{"restrict_upload": true, "other_key": 1}', True),
('{"restrict_upload" : true}', True),
('{"restrict_upload" : True}', True),
('{"restrict_upload": 1}', False),
('{"restrict_upload": false}', False),
('{"restrict_uploa": true}', False),
('{"upload_restrict_upload": true}', False),
])
def test__restrict_in_extra(ep, res):
assert request_handling._restrict_in_extra(ep) is res
def test__current_utc_timestamp():
t = request_handling._current_utc_timestamp()
assert isinstance(t, pd.Timestamp)
assert t.tzinfo == pytz.utc
def test_restrict_upload_window_noop():
assert request_handling.restrict_forecast_upload_window(
'', None, None) is None
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T00:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:01Z'), pd.Timestamp('2019-11-02T13:00Z')),
])
def test_restrict_upload_window(mocker, now, first):
fxd = VALID_FORECAST_JSON.copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
@pytest.mark.parametrize('now,first', [
(pd.Timestamp('2019-11-01T11:59Z'), pd.Timestamp('2019-11-01T13:00Z')),
(pd.Timestamp('2019-11-01T12:00Z'), pd.Timestamp('2019-11-01T13:00Z')),
# as currently and previously (pre-rc1) implemented in core, midnight is a
# valid forecast init time even if doesn't start until mid-day
# pre-rc1 did not 11/1 00:00, but would include 11/2 00:00 in issue times
(pd.Timestamp('2019-11-01T00:00Z'), pd.Timestamp('2019-11-01T01:00Z')),
(pd.Timestamp('2019-11-01T22:01Z'), pd.Timestamp('2019-11-02T00:00Z')),
(pd.Timestamp('2019-11-01T23:20Z'), pd.Timestamp('2019-11-02T01:00Z'))
])
def test_restrict_upload_window_freq(mocker, now, first):
fxd = demo_forecasts['f8dd49fa-23e2-48a0-862b-ba0af6dec276'].copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
def test_restrict_upload_window_cdf_dict(mocker):
now = pd.Timestamp('2019-11-01T11:59Z')
first = pd.Timestamp('2019-11-01T13:00Z')
fxd = VALID_CDF_FORECAST_JSON.copy()
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
request_handling.restrict_forecast_upload_window(ep, lambda: fxd, first)
def test_restrict_upload_window_cant_get(mocker):
now = pd.Timestamp('2019-11-01T11:59Z')
first = pd.Timestamp('2019-11-01T13:00Z')
ep = '{"restrict_upload": true}'
mocker.patch(
'sfa_api.utils.request_handling._current_utc_timestamp',
return_value=now)
get = mocker.MagicMock(side_effect=StorageAuthError)
with pytest.raises(NotFoundException):
request_handling.restrict_forecast_upload_window(ep, get, first)
@pytest.mark.parametrize('now,first', [
( | pd.Timestamp('2019-11-01T11:59Z') | pandas.Timestamp |
"""
Functions for calculating results from trained neural networks.
"""
# ---------------------------------- Imports ----------------------------------
# Python libraries
from datetime import datetime
import time
import numpy as np
import pandas as pd
from math import sqrt
from sklearn.metrics import confusion_matrix, roc_curve, auc
# ----------------------------- Class definitions -----------------------------
class ModelResults():
"""
This class calculates results from the trained EventNN model.
"""
def __init__(self, index):
self.run_index = index
self.time_start = 0
self.time_elapsed = 0
def start_timer(self):
"""
Begin timing code execution.
"""
self.time_start = time.time()
def stop_timer(self, verbose=True):
"""
Stop programme timer and optionally print the run time.
Parameters
----------
verbose : TYPE, bool
Print run time. The default is True.
"""
self.time_elapsed = time.time() - self.time_start
if verbose:
print(f' Run {self.run_index} time: {self.time_elapsed:0.2f}s')
if not self.test_passed:
print(' Training failed, model rejected')
def verify_training(self, neural_net, uniques_limit=250):
"""
Check that the model correctly trained.
1) look at the number of unique predictions made by the model on the
test dataset.
Parameters
----------
neural_net : EventNN, JetRNN, CombinedNN
Keras model class.
uniques_limit : int, optional
Limit on the number of unique model predictions in order to say
that the model successfully trained. The default is 250.
Returns
-------
bool
Returns True is model successfully trained, else False.
"""
self.test_passed = True
test_predictions = neural_net.predict_test_data()
num_unique = len(np.unique(test_predictions, axis=0))
max_pred = test_predictions.max()
min_pred = test_predictions.min()
print(f' len unique predictions: {num_unique}')
print(f' min/max prediction: {min_pred:0.4f}, {max_pred:0.4f}')
if num_unique <= uniques_limit:
self.test_passed = False
if min_pred >= 0.15 or max_pred <= 0.85:
self.test_passed = False
return self.test_passed
def training_history(self, history):
"""
Add training histroy to ModelResults object.
Parameters
----------
history : tensorflow.python.keras.callbacks.History
Model training history.
"""
self.history_training_data = history.history['accuracy']
self.history_test_data = history.history['val_accuracy']
self.accuracy_training = history.history['accuracy'][-1]
self.accuracy_test = history.history['val_accuracy'][-1]
self.accuracy_training_start = history.history['accuracy'][0]
self.accuracy_test_start = history.history['val_accuracy'][0]
def confusion_matrix(self, neural_net, cutoff_threshold):
"""
Calculate confusion matrix and confusion matrix derived results.
"""
# Get model predictions and convert predictions into binary values
test_predictions = neural_net.predict_test_data()
labels_pred_binary = np.where(test_predictions > cutoff_threshold, 1, 0)
# Make confsuion matrix
cm = confusion_matrix(neural_net.labels_test(), labels_pred_binary)
cm = np.rot90(cm)
cm = np.rot90(cm)
print(cm)
self.confusion_matrix = cm
TP = self.confusion_matrix[0, 0]
TN = self.confusion_matrix[1, 1]
FP = self.confusion_matrix[1, 0]
FN = self.confusion_matrix[0, 1]
accuracy = (TP+TN)/(TP+FP+FN+TN)
recall = TP/(TP+FN)
precision = TP/(TP+FP)
f_score = 2*(recall * precision) / (recall + precision)
self.cm_accuracy = accuracy
self.cm_recall = recall
self.cm_precision = precision
self.cm_f_score = f_score
self.cm_cutoff_threshold = cutoff_threshold
def roc_curve(self, neural_net, sample_vals=250):
"""
Produce the receiver operating characteristic curve (ROC curve) values.
Parameters
----------
neural_net : EventNN, JetRNN, CombinedNN
Keras model class.
sample_vals : int, optional
The number of ROC curve values to keep. Saves a random sample
of the full set of values. The default is 250.
"""
test_predictions = neural_net.predict_test_data()
fpr, tpr, thresholds = roc_curve(neural_net.labels_test(), test_predictions)
roc_auc = auc(fpr, tpr)
indices = sorted(np.random.choice(len(fpr), sample_vals-2, replace=False))
indices = [0]+indices
indices.append(len(fpr)-1)
self.roc_fpr_vals = fpr[indices]
self.roc_tpr_vals = tpr[indices]
self.roc_threshold_vals = thresholds[indices]
self.roc_auc = roc_auc
print(f' ROC AUC: {roc_auc:0.4f}')
def discriminator_hist(self, neural_net, num_bins=50):
labels_test = neural_net.labels_test()
labels_pred = neural_net.predict_test_data()
labels_pred_signal = labels_pred[np.array(labels_test, dtype=bool)]
labels_pred_background = labels_pred[np.invert(np.array(labels_test, dtype=bool))]
self.bins = np.linspace(0, 1, num_bins)
self.signal_bin_vals, _ = np.histogram(labels_pred_signal, self.bins)
self.background_bin_vals, _ = np.histogram(labels_pred_background, self.bins)
def calc_significance(self, dataset, num_thresholds=200, ZA=True):
"""
Calculate the significance plot.
Parameters
----------
dataset : pandas.core.frame.DataFrame
Dataframe of values needed to create significance plot.
num_thresholds : int, optional
Number of discriminator threshold values to calculate the
significance for. Number of points generated to plot.
The default is 200.
ZA : bool, optional
Whether to use the full formula for the significance, or simple
use s/sqrt(b) approximation. The default is True.
Returns
-------
bin_centres_sig : list
List of threshold values. x-axis vals.
sig_vals : numpy.ndarray
Values for the significance.
"""
bin_centres_sig = []
bin_vals_sig = []
bin_centres_back = []
bin_vals_back = []
sig_vals = []
thresholds = np.linspace(0, 1, num_thresholds)
for i in range(len(thresholds)-1):
df_selection = dataset[dataset['labels_pred'].between(thresholds[i], 1)]
df_sig = df_selection[df_selection['event_labels'] == 1]
df_back = df_selection[df_selection['event_labels'] == 0]
sum_xs_weight_sig = df_sig['xs_weight'].sum()
sum_xs_weight_back = df_back['xs_weight'].sum()
if sum_xs_weight_sig <= 0 or sum_xs_weight_back <= 0:
continue
bin_centres_sig.append(thresholds[i])
bin_vals_sig.append(sum_xs_weight_sig)
bin_centres_back.append(thresholds[i])
bin_vals_back.append(sum_xs_weight_back)
s = sum_xs_weight_sig
b = sum_xs_weight_back
if ZA == True:
z = sqrt(2*((s+b)*np.log(1+(s/b))-s)) # Calculate significance
sig_vals.append(z)
else:
sig_vals.append(s/(sqrt(b)))
sig_vals = np.asarray(sig_vals)
self.max_significance = bin_centres_sig[sig_vals.argmax()]
return bin_centres_sig, sig_vals
def multi_class_roc_curve(self, neural_net, sample_vals=500):
'''
Calculates the ROC curve for a multi signal classifier.
Parameters
----------
neural_net : TYPE
DESCRIPTION.
sample_vals : TYPE, optional
DESCRIPTION. The default is 500.
Returns
-------
None.
'''
class_labels = list(neural_net.encoding_dict.keys())
self.fpr = dict()
self.tpr = dict()
self.roc_auc = dict()
labels_test = neural_net.multi_labels_test()
labels_pred = neural_net.predict_test_data()
for idx, lab in enumerate(class_labels):
fpr_full, tpr_full, _ = roc_curve(labels_test[:, idx], labels_pred[:, idx])
self.roc_auc[lab] = auc(fpr_full, tpr_full)
indices = sorted(np.random.choice(len(fpr_full), sample_vals, replace=False))
self.fpr[lab] = fpr_full[indices]
self.tpr[lab] = tpr_full[indices]
#Compute the micro-average ROC curve and ROC area for all classes
fpr_full, tpr_full, _ = roc_curve(labels_test.ravel(), labels_pred.ravel())
self.roc_auc["micro"] = auc(fpr_full, tpr_full)
indices = sorted(np.random.choice(len(fpr_full), sample_vals, replace=False))
self.fpr['micro'] = fpr_full[indices]
self.tpr['micro'] = tpr_full[indices]
def multi_class_confusion_matrix(self, neural_net):
self.encoding_dict = neural_net.encoding_dict
labels_test = neural_net.multi_labels_test()
labels_pred = neural_net.predict_test_data()
labels_pred = np.argmax(labels_pred, axis=1)
labels_test = np.argmax(labels_test, axis=1)
# Create confusion matrix
self.cm = confusion_matrix(labels_test, labels_pred)
def to_dict(self, floats_only=False):
"""
Return a dictionary of the class attributes.
Parameters
----------
floats_only : bool, optional
If floats_only is True, then only the attributes which are single
values will be returned. Else all attribute are returned.
The default is False.
Returns
-------
self_dict : dict
Dictionary representation of class attributes.
"""
self_dict = self.__dict__
if floats_only:
types = [float, int, np.float64, np.int32]
self_dict = {k: v for k, v in self_dict.items() if type(v) in types}
return self_dict
def to_dataframe(self):
"""
Return a pandas dataframe of the class attributes.
Returns
-------
results_df : pandas.DataFrame
Dataframe representation of class attributes.
"""
output_dict = self.to_dict()
results_df = pd.DataFrame([output_dict])
results_df = results_df.reindex(sorted(results_df.columns), axis=1)
index_col = results_df.pop('run_index')
results_df.insert(0, 'run_index', index_col)
return results_df
class ModelResultsMulti():
"""
This class stores the results from multiple runs of training the EventNN
model.
"""
def __init__(self):
self.df_results = pd.DataFrame()
def add_result(self, result, additional_results={}):
"""
Add a training result to the main dataframe.
Parameters
----------
result : ModelResults
ModelResults class object of training results.
additional_results : dict
Additional values to add to model_results dataframe in the form
of a dictionary. Used to add model arguments to results.
"""
df = result.to_dataframe()
for key, value in additional_results.items():
try:
df.insert(1, key, value)
except:
pass
self.df_results = | pd.concat([self.df_results, df], axis=0, ignore_index=True) | pandas.concat |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Preprocess ieee-fraud-detection dataset.
(https://www.kaggle.com/c/ieee-fraud-detection).
Train shape:(590540,394),identity(144233,41)--isFraud 3.5%
Test shape:(506691,393),identity(141907,41)
############### TF Version: 1.13.1/Python Version: 3.7 ###############
"""
import os
import gc
import random
import warnings
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from copy import deepcopy
from sklearn import metrics
from scipy.stats import ks_2samp
from bayes_opt import BayesianOptimization
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
warnings.filterwarnings('ignore')
# make all processes deterministic/固定随机数生成器的种子
# environ是一个字符串所对应环境的映像对象,PYTHONHASHSEED为其中的环境变量
# Python会用一个随机的种子来生成str/bytes/datetime对象的hash值;
# 如果该环境变量被设定为一个数字,它就被当作一个固定的种子来生成str/bytes/datetime对象的hash值
def set_seed(seed=0):
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def model_cv(tr_df, in_df, features_columns, target, params, nfold=5):
folds = StratifiedKFold(n_splits=nfold, shuffle=True, random_state=SEED)
train_x, train_y = tr_df[features_columns], tr_df[target]
infer_x, infer_y = in_df[features_columns], in_df[target]
va_df = []
in_df = in_df[["phone_no_m"]]
preds = np.zeros(len(in_df))
for fold_, (tra_idx, val_idx) in enumerate(folds.split(train_x, train_y)):
print("---------- Fold:", fold_)
tr_x, tr_y = train_x.iloc[tra_idx, :], train_y[tra_idx]
vl_x, vl_y = train_x.iloc[val_idx, :], train_y[val_idx]
print("---------- Train num:", len(tr_x), "Valid num:", len(vl_x))
tr_data = lgb.Dataset(tr_x, label=tr_y)
vl_data = lgb.Dataset(vl_x, label=vl_y)
vl_fold = deepcopy(tr_df.iloc[val_idx, :][["phone_no_m", target]])
estimator = lgb.train(params, tr_data, valid_sets=[tr_data, vl_data], verbose_eval=50)
valid_p = estimator.predict(vl_x)
infer_p = estimator.predict(infer_x)
preds += infer_p / nfold
vl_fold["pred"] = valid_p
va_df.append(vl_fold)
del tr_x, tr_y, vl_x, vl_y, tr_data, vl_data, vl_fold
gc.collect()
in_df["label"] = preds
return in_df, va_df
if __name__ == "__main__":
print("========== 1.Set random seed ...")
SEED = 32
set_seed(SEED)
print("========== 2.Load csv data ...")
TARGET = "label"
tra_path = os.getcwd() + "\\data_train.csv"
tes_path = os.getcwd() + "\\data_tests.csv"
train_df = pd.read_csv(tra_path, encoding="utf-8")
infer_df = pd.read_csv(tes_path, encoding="utf-8")
# Encode Str columns
for col in list(train_df):
if train_df[col].dtype == 'O':
print(col)
# train_df[col] = train_df[col].fillna("UNK")
# infer_df[col] = infer_df[col].fillna("UNK")
# train_df[col] = train_df[col].astype(str)
# infer_df[col] = infer_df[col].astype(str)
#
# le = LabelEncoder()
# le.fit(list(train_df[col]) + list(infer_df[col]))
# train_df[col] = le.transform(train_df[col])
# infer_df[col] = le.transform(infer_df[col])
train_df[col] = train_df[col].astype("category")
infer_df[col] = infer_df[col].astype("category")
# Model Features
rm_cols = ["phone_no_m", TARGET, ]
features_cols = [col for col in list(train_df) if col not in rm_cols]
# Model params
lgb_params = {
'objective': 'binary',
'boosting': 'gbdt',
'metric': 'auc',
'tree_learner': 'serial',
'seed': SEED,
'n_estimators': 300,
'learning_rate': 0.02,
'max_depth': 5,
'num_leaves': 24,
'min_data_in_leaf': 32,
'bagging_freq': 1,
'bagging_fraction': 0.75,
'feature_fraction': 0.7,
'lambda_l1': 0.01,
'lambda_l2': 0.01,
'min_gain_to_split': 5.0,
'max_bin': 255,
'verbose': -1,
'early_stopping_rounds': 100,
}
# 单特征筛选
Feature_Sel = 0
if Feature_Sel:
data_list = []
for i in range(len(features_cols)):
feat_eva = deepcopy(features_cols)
del feat_eva[i]
print(i, len(feat_eva))
_, valid_pred = model_cv(train_df, infer_df, feat_eva, TARGET, lgb_params, nfold=5)
valid_df = pd.concat(valid_pred)
fpr, tpr, _ = metrics.roc_curve(valid_df[TARGET], valid_df["pred"])
valid_auc = metrics.auc(fpr, tpr)
print("\n===== OOF Valid AUC: ", valid_auc)
valid_df["pred"] = valid_df["pred"].map(lambda x: 1 if x >= 0.3 else 0)
valid_f1 = metrics.f1_score(valid_df[TARGET], valid_df["pred"], average="macro")
print("\n===== OOF Valid F1-Score: ", valid_f1)
data_list.append([i, features_cols[i], valid_auc, valid_f1])
data_name = ["idx", "feature", "OOF_AUC", "OOF_F1"]
data_res = pd.DataFrame(columns=data_name, data=data_list)
data_etl = data_res.sort_values(by=["OOF_AUC", "OOF_F1"], axis=0, ascending=False)
data_etl.round({"OOF_AUC": 3, "OOF_F1": 3}).to_csv("feat_eva.csv", sep=",", index=False, header=True)
# 模型训练
TRAIN_CV = 1
TRAIN_IF = 1
if TRAIN_CV:
print("===== Used feature len:", len(features_cols))
print("===== Used feature list:", features_cols)
infer_pred, valid_pred = model_cv(train_df, infer_df, features_cols, TARGET, lgb_params, nfold=5)
valid_df = pd.concat(valid_pred)
fpr, tpr, _ = metrics.roc_curve(valid_df[TARGET], valid_df["pred"])
valid_auc = metrics.auc(fpr, tpr)
print("\n===== OOF Valid AUC: ", valid_auc)
valid_df["pred"] = valid_df["pred"].map(lambda x: 1 if x >= 0.3 else 0)
valid_f1 = metrics.f1_score(valid_df[TARGET], valid_df["pred"], average="macro")
print("\n===== OOF Valid F1-Score: ", valid_f1)
# Export
if TRAIN_IF:
infer_pred["label"] = infer_pred["label"].map(lambda x: 1 if x >= 0.3 else 0)
infer_pred[["phone_no_m", "label"]].to_csv("submit_0708.csv", index=False)
# 贝叶斯参数优化
Feature_Opt = 0
if Feature_Opt:
opt_lgb = {
"p1": (100, 400),
"p2": (0.01, 0.2),
"p3": (4, 6),
"p4": (8, 32),
"p5": (16, 64),
"p6": (0.5, 0.8),
"p7": (0.5, 0.8),
"p8": (0.01, 0.8),
"p9": (0.01, 0.8),
}
def opt_lgb_para(p1, p2, p3, p4, p5, p6, p7, p8, p9):
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED)
train_x, train_y = train_df[features_cols], train_df[TARGET]
opt_params = {
'objective': 'binary',
'boosting': 'gbdt',
'metric': 'auc',
'tree_learner': 'serial',
'seed': SEED,
'n_estimators': int(p1),
'learning_rate': round(p2, 2),
'max_depth': int(p3),
'num_leaves': int(p4),
'min_data_in_leaf': int(p5),
'bagging_freq': 1,
'bagging_fraction': round(p6, 2),
'feature_fraction': round(p7, 2),
'lambda_l1': round(p8, 2),
'lambda_l2': round(p9, 2),
'min_gain_to_split': 5.0,
'max_bin': 255,
'verbose': -1,
'early_stopping_rounds': 100,
}
va_df = []
for fold_, (tra_idx, val_idx) in enumerate(folds.split(train_x, train_y)):
print("-----Fold:", fold_)
tr_x, tr_y = train_x.iloc[tra_idx, :], train_y[tra_idx]
vl_x, vl_y = train_x.iloc[val_idx, :], train_y[val_idx]
print("-----Train num:", len(tr_x), "Valid num:", len(vl_x))
tr_data = lgb.Dataset(tr_x, label=tr_y)
vl_data = lgb.Dataset(vl_x, label=vl_y)
vl_fold = deepcopy(train_df.iloc[val_idx, :][[TARGET]])
estimator = lgb.train(opt_params, tr_data, valid_sets=[tr_data, vl_data], verbose_eval=50)
valid_p = estimator.predict(vl_x)
vl_fold["pred"] = valid_p
va_df.append(vl_fold)
del tr_x, tr_y, vl_x, vl_y, tr_data, vl_data, vl_fold
gc.collect()
val_pred = | pd.concat(va_df) | pandas.concat |
import logging
import pandas as pd
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from constants import (PLOTLY_TEMPLATE, PANDAS_TEMPLATE)
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.metrics import roc_auc_score, roc_curve
def baseline_trainer(processed_df, algorithm, cf, name=None):
logger = logging.getLogger(__name__)
id_col = ['customerID']
target_col = ["Churn"]
train, test = train_test_split(processed_df, test_size=.25, random_state=111)
cols = [i for i in processed_df.columns if i not in id_col + target_col]
train_X = train[cols]
train_Y = train[target_col]
test_X = test[cols]
test_Y = test[target_col]
logger.info('Building and Validating Model')
algorithm.fit(train_X, train_Y)
predictions = algorithm.predict(test_X)
probabilities = algorithm.predict_proba(test_X)
if cf == "coefficients":
coefficients = pd.DataFrame(algorithm.coef_.ravel())
elif cf == "features":
coefficients = pd.DataFrame(algorithm.feature_importances_)
column_df = pd.DataFrame(cols)
coef_sumry = (pd.merge(coefficients, column_df, left_index=True,
right_index=True, how="left"))
coef_sumry.columns = ["coefficients", "features"]
coef_sumry = coef_sumry.sort_values(by="coefficients", ascending=False)
print(f"\n Classification report : \n, {classification_report(test_Y, predictions)}")
print(f"Accuracy Score : {accuracy_score(test_Y, predictions)}\n")
conf_matrix = confusion_matrix(test_Y, predictions)
print(f'Confusion Matrix:\n{conf_matrix}')
model_roc_auc = roc_auc_score(test_Y, predictions)
print(f"Area under curve :\n{model_roc_auc} \n")
fpr, tpr, thresholds = roc_curve(test_Y, probabilities[:, 1])
metrics = f'''
Classification Report:\n{classification_report(test_Y, predictions)}\n,
AUC Score: {model_roc_auc}\n,
Confusion Matrix:\n{conf_matrix}\n
'''
f = open(f'../../models/baseline_{name}_metrics.txt', 'w')
f.write(metrics)
f.close()
logger.info('Producing Evaluation Report')
trace1 = go.Heatmap(z=conf_matrix,
x=["Not Churn", "Churn"],
y=["Not Churn", "Churn"],
showscale=False,
colorscale="Picnic",
name="matrix")
# plot roc curve
trace2 = go.Scatter(x=fpr, y=tpr,
name="Roc : " + str(model_roc_auc),
line=dict(color='rgb(22, 96, 167)', width=2))
trace3 = go.Scatter(x=[0, 1], y=[0, 1],
line=dict(color='rgb(205, 12, 24)', width=2,
dash='dot'))
# plot coeffs
trace4 = go.Bar(x=coef_sumry["features"], y=coef_sumry["coefficients"],
name="coefficients",
marker=dict(color=coef_sumry["coefficients"],
colorscale="Picnic",
line=dict(width=.6, color="black")))
# subplots
fig = make_subplots(rows=2, cols=2, specs=[[{}, {}], [{'colspan': 2}, None]],
subplot_titles=('Confusion Matrix',
'Receiver operating characteristic',
'Feature Importances'))
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig.append_trace(trace3, 1, 2)
fig.append_trace(trace4, 2, 1)
fig['layout'].update(showlegend=False, title=f"{name} performance",
autosize=False, height=900, width=800,
plot_bgcolor='rgba(240,240,240, 0.95)',
paper_bgcolor='rgba(240,240,240, 0.95)',
margin=dict(b=195))
fig["layout"]["xaxis2"].update(dict(title="false positive rate"))
fig["layout"]["yaxis2"].update(dict(title="true positive rate"))
fig["layout"]["xaxis3"].update(dict(showgrid=True, tickfont=dict(size=10),
tickangle=90))
fig.layout['hovermode'] = 'x'
fig.show()
return algorithm
def create_report(algorithm, test_X, test_Y):
predictions = algorithm.predict(test_X)
probabilities = algorithm.predict_proba(test_X)
model_roc_auc = roc_auc_score(test_Y, predictions)
metrics = f'''
Classification Report:\n{classification_report(test_Y, predictions)}\n,
Accuracy Score : {accuracy_score(test_Y, predictions)}\n"
AUC Score: {model_roc_auc}\n,
Confusion Matrix:\n{confusion_matrix(test_Y, predictions)}\n
'''
return metrics
def plot_report(processed_df, algorithm, test_X, test_Y, cf, name=None):
id_col = ['customerID']
target_col = ["Churn"]
cols = [i for i in processed_df.columns if i not in id_col + target_col]
if cf == "coefficients":
coefficients = pd.DataFrame(algorithm.coef_.ravel())
elif cf == "features":
coefficients = pd.DataFrame(algorithm.feature_importances_)
column_df = | pd.DataFrame(cols) | pandas.DataFrame |
from LIMBR import simulations
import pandas as pd
sims = {}
for i in range(1,21):
analysis = simulations.analyze('twenty_miss_5_NN_' + str(i) + '_true_classes.txt')
analysis.add_data('twenty_miss_5_NN_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR', include_missing=True)
analysis.calculate_auc()
sims[i] = analysis.roc_auc
data = pd.DataFrame(sims).T
data['Params'] = 'twenty_miss_5_NN'
sims = {}
for i in range(1,21):
analysis = simulations.analyze('twenty_miss_10_NN_' + str(i) + '_true_classes.txt')
analysis.add_data('twenty_miss_10_NN_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR', include_missing=True)
analysis.calculate_auc()
sims[i] = analysis.roc_auc
temp_data = pd.DataFrame(sims).T
temp_data['Params'] = 'twenty_miss_10_NN'
data = pd.concat([data, temp_data])
for i in range(1,21):
analysis = simulations.analyze('twenty_miss_15_NN_' + str(i) + '_true_classes.txt')
analysis.add_data('twenty_miss_15_NN_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR', include_missing=True)
analysis.calculate_auc()
sims[i] = analysis.roc_auc
temp_data = pd.DataFrame(sims).T
temp_data['Params'] = 'twenty_miss_15_NN'
data = pd.concat([data, temp_data])
for i in range(1,21):
analysis = simulations.analyze('thirty_miss_5_NN_' + str(i) + '_true_classes.txt')
analysis.add_data('thirty_miss_5_NN_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR', include_missing=True)
analysis.calculate_auc()
sims[i] = analysis.roc_auc
temp_data = pd.DataFrame(sims).T
temp_data['Params'] = 'thirty_miss_5_NN'
data = pd.concat([data, temp_data])
for i in range(1,21):
analysis = simulations.analyze('thirty_miss_10_NN_' + str(i) + '_true_classes.txt')
analysis.add_data('thirty_miss_10_NN_' + str(i) + '_LIMBR_processed__jtkout_GammaP.txt','LIMBR', include_missing=True)
analysis.calculate_auc()
sims[i] = analysis.roc_auc
temp_data = pd.DataFrame(sims).T
temp_data['Params'] = 'thirty_miss_10_NN'
data = | pd.concat([data, temp_data]) | pandas.concat |
import s3fs
import numpy as np
import pandas as pd
import xarray as xr
from glob import glob
from os.path import join, exists
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler, MinMaxScaler
from operator import lt, le, eq, ne, ge, gt
scalers = {"MinMaxScaler": MinMaxScaler,
"MaxAbsScaler": MaxAbsScaler,
"StandardScaler": StandardScaler,
"RobustScaler": RobustScaler}
ops = {"<": lt, "<=": le, "==": eq, "!=": ne, ">=": ge, ">": gt}
def log10_transform(x, eps=1e-18):
return np.log10(np.maximum(x, eps))
def neg_log10_transform(x, eps=1e-18):
return np.log10(np.maximum(-x, eps))
def zero_transform(x, eps=None):
return np.zeros(x.shape, dtype=np.float32)
def inverse_log10_transform(x):
return 10.0 ** x
def inverse_neg_log10_transform(x):
return -10.0 ** x
transforms = {"log10_transform": log10_transform,
"neg_log10_transform": neg_log10_transform,
"zero_transform": zero_transform}
inverse_transforms = {"log10_transform": inverse_log10_transform,
"neg_log10_transform": inverse_neg_log10_transform,
"zero_transform": zero_transform}
def load_cam_output(path, file_start="TAU_run1.cam.h1", file_end="nc"):
"""
Load set of model output from CAM/CESM into xarray Dataset object.
Args:
path: Path to directory containing model output
file_start: Shared beginning of model files
file_end: Filetype shared by all files.
Returns:
xarray Dataset object containing the model output
"""
if not exists(path):
raise FileNotFoundError("Specified path " + path + " does not exist")
data_files = sorted(glob(join(path, file_start + "*" + file_end)))
if len(data_files) > 0:
cam_dataset = xr.open_mfdataset(data_files, decode_times=False)
else:
raise FileNotFoundError("No matching CAM output files found in " + path)
return cam_dataset
def get_cam_output_times(path, time_var="time", file_start="TAU_run1.cam.h1", file_end="nc"):
if not exists(path):
raise FileNotFoundError("Specified path " + path + " does not exist")
data_files = sorted(glob(join(path, file_start + "*" + file_end)))
file_time_list = []
for data_file in data_files:
ds = xr.open_dataset(data_file, decode_times=False, decode_cf=False)
time_minutes = (ds[time_var].values * 24 * 60).astype(int)
file_time_list.append(pd.DataFrame({"time": time_minutes,
"filename": [data_file] * len(time_minutes)}))
ds.close()
del ds
return pd.concat(file_time_list, ignore_index=True)
def unstagger_vertical(dataset, variable, vertical_dim="lev"):
"""
Interpolate a 4D variable on a staggered vertical grid to an unstaggered vertical grid. Will not execute
until compute() is called on the result of the function.
Args:
dataset: xarray Dataset object containing the variable to be interpolated
variable: Name of the variable being interpolated
vertical_dim: Name of the vertical coordinate dimension.
Returns:
xarray DataArray containing the vertically interpolated data
"""
var_data = dataset[variable]
unstaggered_var_data = xr.DataArray(0.5 * (var_data[:, :-1].values + var_data[:, 1:].values),
coords=[var_data.time, dataset[vertical_dim], var_data.lat, var_data.lon],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_" + vertical_dim)
return unstaggered_var_data
def split_staggered_variable(dataset, variable, vertical_dim="lev"):
"""
Split vertically staggered variable into top and bottom subsets with the unstaggered
vertical coordinate
Args:
dataset: xarray Dataset object
variable: Name of staggered variable
vertical_dim: Unstaggered vertical dimension
Returns:
top_var_data, bottom_var_data: xarray DataArrays containing the unstaggered vertical data
"""
var_data = dataset[variable]
top_var_data = xr.DataArray(var_data[:, :-1], coords=[var_data.time,
dataset[vertical_dim],
var_data["lat"],
var_data["lon"]],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_top")
bottom_var_data = xr.DataArray(var_data[:, 1:], coords=[var_data.time,
dataset[vertical_dim],
var_data["lat"],
var_data["lon"]],
dims=("time", vertical_dim, "lat", "lon"),
name=variable + "_bottom")
return xr.Dataset({variable + "_top": top_var_data, variable + "_bottom": bottom_var_data})
def add_index_coords(dataset, row_coord="lat", col_coord="lon", depth_coord="lev"):
"""
Calculate the index values of the row, column, and depth coordinates in a Dataset.
Indices range from 0 to length of coordinate - 1.
Args:
dataset: xarray Dataset
row_coord: name of the row coordinate variable. Default lat.
col_coord: name of the column coordinate variable. Default lon.
depth_coord: name of the depth coordinate variable. Default lev.
Returns:
row, col, depth: DataArrays with the row, col, and depth indices
"""
row = xr.DataArray(np.arange(dataset[row_coord].shape[0]), dims=(row_coord,), name="row")
col = xr.DataArray(np.arange(dataset[col_coord].shape[0]), dims=(col_coord,), name="col")
depth = xr.DataArray(np.arange(dataset[depth_coord].shape[0]), dims=(depth_coord,), name="depth")
return xr.Dataset({"row": row, "col": col, "depth": depth})
def calc_pressure_field(dataset, pressure_var_name="pressure"):
"""
Calculate pressure at each location based on the surface pressure and vertical coordinate
information.
Args:
dataset:
pressure_var_name:
Returns:
"""
pressure = xr.DataArray((dataset["hyam"] * dataset["P0"] + dataset["hybm"] * dataset["PS"]).transpose("time", "lev", "lat", "lon"))
pressure.name = pressure_var_name
pressure.attrs["units"] = "Pa"
pressure.attrs["long_name"] = "atmospheric pressure"
return pressure
def calc_temperature(dataset, density_variable="RHO_CLUBB_lev", pressure_variable="pressure"):
"""
Calculation temperature from pressure and density. The temperature variable is added to the
dataset object in place.
Args:
dataset: xarray Dataset object containing pressure and density variable
density_variable: name of the density variable
pressure_variable: name of the pressure variable
"""
temperature = dataset[pressure_variable] / dataset[density_variable] / 287.0
temperature.attrs["units"] = "K"
temperature.attrs["long_name"] = "temperature derived from pressure and density"
temperature.name = "temperature"
return temperature
def convert_to_dataframe(dataset, variables, times, time_var,
subset_variable, subset_threshold):
"""
Convert 4D Dataset to flat dataframe for machine learning.
Args:
dataset: xarray Dataset containing all relevant variables and times.
variables: List of variables in dataset to be included in DataFrame. All variables should have the same
dimensions and coordinates.
times: Iterable of times to select from dataset.
time_var: Variable used as the time coordinate.
subset_variable: Variable used to select a subset of grid points from file
subset_threshold: Threshold that must be exceeded for examples to be kept.
Returns:
"""
data_frames = []
for t, time in enumerate(times):
print(t, time)
time_df = dataset[variables].sel(**{time_var: time}).to_dataframe()
if type(subset_variable) == list:
valid = np.zeros(time_df.shape[0], dtype=bool)
for s, sv in enumerate(subset_variable):
valid[time_df[subset_variable] >= subset_threshold[s]] = True
else:
valid = time_df[subset_variable] >= subset_threshold
data_frames.append(time_df.loc[valid].reset_index())
print(data_frames[-1])
del time_df
return pd.concat(data_frames)
def load_csv_data(csv_path, index_col="Index"):
"""
Read pre-processed csv files into memory.
Args:
csv_path: Path to csv files
index_col: Column label used as the index
Returns:
`pandas.DataFrame` containing data from all csv files in the csv_path directory.
"""
csv_files = sorted(glob(join(csv_path, "*.csv")))
all_data = []
for csv_file in csv_files:
all_data.append(pd.read_csv(csv_file, index_col=index_col))
return pd.concat(all_data, axis=0)
def subset_data_files_by_date(data_path,
train_date_start=0, train_date_end=8000,
test_date_start=9000,
test_date_end=18000, validation_frequency=3):
"""
For a large set of csv files, this sorts the files into training, validation and testing data.
This way the full dataset does not have to be loaded and then broken into pieces.
Args:
data_path:
train_date_start:
train_date_end:
test_date_start:
test_date_end:
validation_frequency:
Returns:
"""
if train_date_start > train_date_end:
raise ValueError("train_date_start should not be greater than train_date_end")
if test_date_start > test_date_end:
raise ValueError("test_date_start should not be greater than test_date_end")
if train_date_end > test_date_start:
raise ValueError("train and test date periods overlap.")
if data_path == "ncar-aiml-data-commons/microphysics":
fs = s3fs.S3FileSystem(anon=True)
csv_files = pd.Series(sorted(fs.ls("ncar-aiml-data-commons/microphysics")))
data_end = "*.parquet"
else:
data_end = "*.csv"
csv_files = pd.Series(sorted(glob(join(data_path, "*" + data_end))))
file_times = csv_files.str.split("/").str[-1].str.split("_").str[-1].str.strip(data_end).astype(int).values
print("File times:\n",file_times)
train_val_ind = np.where((file_times >= train_date_start) & (file_times <= train_date_end))[0]
test_ind = np.where((file_times >= test_date_start) & (file_times <= test_date_end))[0]
val_ind = train_val_ind[::validation_frequency]
train_ind = np.isin(train_val_ind, val_ind, invert=True)
train_files = csv_files.loc[train_val_ind[train_ind]]
val_files = csv_files.loc[val_ind]
test_files = csv_files.loc[test_ind]
return train_files, val_files, test_files
def subset_data_by_date(data, train_date_start=0, train_date_end=1, test_date_start=2, test_date_end=3,
validation_frequency=3, subset_col="time"):
"""
Subset temporal data into training, validation, and test sets by the date column.
Args:
data: pandas DataFrame containing all data for training, validation, and testing.
train_date_start: First date included in training period
train_date_end: Last date included in training period
test_date_start: First date included in testing period
test_date_end: Last date included in testing period.
validation_frequency: How often days are separated from training dataset for validation.
Should be an integer > 1. 2 is every other day, 3 is every third day, etc.
subset_col: Name of column being used for date evaluation.
Returns:
training_set, validation_set, test_set
"""
if train_date_start > train_date_end:
raise ValueError("train_date_start should not be greater than train_date_end")
if test_date_start > test_date_end:
raise ValueError("test_date_start should not be greater than test_date_end")
if train_date_end > test_date_start:
raise ValueError("train and test date periods overlap.")
train_indices = (data[subset_col] >= train_date_start) & (data[subset_col] <= train_date_end)
test_indices = (data[subset_col] >= test_date_start) & (data[subset_col] <= test_date_end)
train_and_validation_data = data.loc[train_indices]
test_data = data.loc[test_indices]
train_and_validation_dates = np.unique(train_and_validation_data[subset_col].values)
validation_dates = train_and_validation_dates[validation_frequency::validation_frequency]
train_dates = train_and_validation_dates[np.isin(train_and_validation_dates,
validation_dates,
assume_unique=True,
invert=True)]
train_data = data.loc[np.isin(data[subset_col].values, train_dates)]
validation_data = data.loc[np.isin(data[subset_col].values, validation_dates)]
return train_data, validation_data, test_data
def categorize_output_values(output_values, output_transforms, output_scalers=None):
"""
Transform and rescale the values of a single output variable based on specified transforms and scaling functions.
The microphysics tendencies can be either 0 or nonzero and tend to have exponential distributions. Machine
learning models tend to perform better when the data are more Gaussian. To account for this, we perform
log transforms on the nonzero values and then normalize them. The rain water number concentration (Nr)
tendencies have both positive and negative exponential tendencies, so we have separate models for each side
and separate transformations and normalizations.
Currently, we have support for the following transforms:
* log10_transform: applies np.log10
* neg_log10_transform: applies np.log10 to the additive inverse of the inputs
* zero_transform: returns an array the same shape as the input but with all 0s
We support the following scalers:
* StandardScaler:
* MinMaxScaler: Rescales values to range between 0 and 1
* MaxAbsScaler:
Args:
output_values: numpy.ndarray of shape (samples, 1) with the raw output values from the microphysics
output_transforms: dictionary with keys indicating sign of tendency and values containing a 3-element list
[comparison operator, splitting threshold, transform type].
output_scalers:
Returns:
"""
labels = np.zeros(output_values.shape, dtype=int)
transformed_outputs = np.zeros(output_values.shape)
scaled_outputs = np.zeros(output_values.shape)
if output_scalers is None:
output_scalers = {}
for label, comparison in output_transforms.items():
class_indices = ops[comparison[0]](output_values, float(comparison[1]))
labels[class_indices] = label
if comparison[2] != "None":
transformed_outputs[class_indices] = transforms[comparison[2]](output_values[class_indices],
eps=float(comparison[1]))
else:
transformed_outputs[class_indices] = output_values[class_indices]
# If the transform is 'None', then don't transform, copy exactly from the original data
if comparison[3] != "None":
if label not in list(output_scalers.keys()):
output_scalers[label] = scalers[comparison[3]]()
scaled_outputs[class_indices] = output_scalers[label].fit_transform(
transformed_outputs[class_indices].reshape(-1, 1)).ravel()
else:
scaled_outputs[class_indices] = output_scalers[label].transform(
transformed_outputs[class_indices].reshape(-1, 1)).ravel()
else:
output_scalers[label] = None
scaled_outputs[class_indices] = transformed_outputs[class_indices]
# If the scaler is None, copy exactly from Transform data, should not be 0
return labels, transformed_outputs, scaled_outputs, output_scalers
def open_data_file(filename):
if "ncar-aiml-data-commons/microphysics" in filename:
fs = s3fs.S3FileSystem(anon=True)
fobj = fs.open(filename)
ds = pd.read_parquet(fobj).set_index('Index')
return ds
else:
ds = | pd.read_csv(filename, index_col="Index") | pandas.read_csv |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in | range(20) | pandas.compat.range |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.