prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from nltk.corpus import stopwords
import re
import pandas as pd
import numpy as np
def get_features(text, index):
stop_words = set(stopwords.words('russian'))
txt = text.fillna('').str.lower()
features = {'punct': [],
'string_len': [],
'count_nums':[],
'number_words':[],
'stop_words':[],
'not_stop_words':[],
'larger_than_three':[],
'exclamation': []}
for tx in txt:
tx_p = remove_punct(tx)
features['punct'].append(get_punct(tx))
features['string_len'].append(len(tx))
features['count_nums'].append(get_num(tx))
features['number_words'].append(len(tx_p.split()))
features['stop_words'].append(get_stop_words(tx_p, stop_words))
features['not_stop_words'].append(not_stop_words(tx_p, stop_words))
features['larger_than_three'].append(words_larger_three(tx_p))
features['exclamation'].append(exclam(tx))
return | pd.DataFrame(features, index=index, dtype=float) | pandas.DataFrame |
import pandas as pd
def to_date(object):
try:
date = pd.to_datetime(object).date()
if pd.isnull(date):
date = None
except:
date = None
return date
def series_to_list(object):
return object.where( | pd.notnull(object) | pandas.notnull |
from flask import Flask
from flask_restful import reqparse,Api,Resource
import pandas as pd
import re
#from json import dumps
#nltk.download('stopwords')
from nltk.stem.porter import PorterStemmer
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score
from sklearn.feature_extraction.text import CountVectorizer
parser=reqparse.RequestParser()
parser.add_argument('query')
class PredictSentiment(Resource):
def get(self):
dataset1 = | pd.read_csv('/home/shrinidhikr/Downloads/fwdlocalhackdsay/train.csv' , encoding='cp437') | pandas.read_csv |
import argparse
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def get_args():
"""
Get arguments from command line with argparse.
"""
parser = argparse.ArgumentParser(
prog='RMA-class-join.py',
description="""Summarize read assignments obtained from MEGAN RMA files.""")
parser.add_argument("-c", "--counts",
required=True,
nargs='+',
help="The complete set of read count files.")
parser.add_argument("-s", "--summaries",
required=True,
nargs='+',
help="The complete set of absolute summary count files.")
parser.add_argument("-r", "--rcfile",
required=True,
help="The read count summary for the fasta file(s).")
parser.add_argument("-o", "--outname",
required=True,
help="Name of output file.")
parser.add_argument("-p", "--outdir",
required=True,
help="Output directory path to write output files.")
return parser.parse_args()
def make_df(f):
"""
Read in dataframe using read and count as column headers, return dataframe.
"""
df = pd.read_csv(f, sep = '\t', names = ['read', 'annotation'], header = None)
return df
def summarize_read_counts(df):
count_dist = sorted(df['read'].value_counts().values)
avg_annotation_per_read = round(sum(count_dist)/len(count_dist), 1)
total_unique_reads = len(count_dist)
return count_dist, avg_annotation_per_read, total_unique_reads
def make_annotations_dict(counts):
annotations_dict = {}
samplereads_dict = {"Combined_Functional":{}, "Combined_Taxonomic":{}}
functional_dbs = ['EC', 'EGGNOG', 'INTERPRO2GO', 'SEED']
taxonomic_dbs = ['GTDB', 'NCBI', 'NCBIbac']
for f in counts:
database = f.split('/')[-1].split('.')[1]
if database not in annotations_dict:
annotations_dict[database] = {}
df = make_df(f)
count_dist, avg_annotation_per_read, total_unique_reads = summarize_read_counts(df)
sample = f.split('/')[-1].split('.')[0]
if sample not in samplereads_dict["Combined_Functional"]:
samplereads_dict["Combined_Functional"][sample] = set()
if sample not in samplereads_dict["Combined_Taxonomic"]:
samplereads_dict["Combined_Taxonomic"][sample] = set()
if database in functional_dbs:
samplereads_dict["Combined_Functional"][sample].update(df['read'].unique())
elif database in taxonomic_dbs:
samplereads_dict["Combined_Taxonomic"][sample].update(df['read'].unique())
annotations_dict[database][sample] = {'count_dist':count_dist,
'avg_annotation_per_read':avg_annotation_per_read,
'total_unique_reads':total_unique_reads}
for db, dict in samplereads_dict.items():
annotations_dict[db] = {}
for sample, readset in dict.items():
annotations_dict[db][sample] = {'total_unique_reads':len(readset),
'count_dist':0,
'avg_annotation_per_read':0,
'classes':0}
return annotations_dict
def add_summary_info(annotations_dict, samples, summaries):
for f in summaries:
database = f.split('/')[-1].split('.')[1]
df = | pd.read_csv(f, sep='\t') | pandas.read_csv |
# Dec 21 to mod for optional outputting original counts
##
#---------------------------------------------------------------------
# SERVER only input all files (.bam and .fa) output MeH matrix in .csv
# Oct 19, 2021 ML after imputation test
# github
#---------------------------------------------------------------------
import random
import math
import pysam
import csv
import sys
import pandas as pd
import numpy as np
import datetime
import time as t
from collections import Counter, defaultdict, OrderedDict
#---------------------------------------
# Functions definition
#---------------------------------------
def open_log(fname):
open_log.logfile = open(fname, 'w', 1)
def logm(message):
log_message = "[%s] %s\n" % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), message)
print(log_message),
open_log.logfile.write(log_message)
def close_log():
open_log.logfile.close()
# Check whether a window has enough reads for complete/impute
def enough_reads(window,w,complete):
temp=np.isnan(window).sum(axis=1)==0
if complete: # For heterogeneity estimation
return temp.sum()>=3
else: # for imputation
tempw1=np.isnan(window).sum(axis=1)==1
#return temp.sum()>=2**(w-2) and tempw1.sum()>0
return temp.sum()>=2 and tempw1.sum()>0
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def outwindow(pat,patori,pos,chrom,w,M,UM,Mo,UMo,mC=4,strand='f',optional=False):
# get complete reads
tempori=np.isnan(patori).sum(axis=1)==0
patori=patori[np.where(tempori)[0],:]
countori=np.zeros((2**w,1))
temp=np.isnan(pat).sum(axis=1)==0
pat=pat[np.where(temp)[0],:]
count=np.zeros((2**w,1))
# m=np.shape(pat)[0]
pat=np.array(pat)
if w==2:
pat = Counter([str(i[0])+str(i[1]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00','10','01','11']])
if optional:
patori = Counter([str(i[0])+str(i[1]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['00','10','01','11']])
if w==3:
pat = Counter([str(i[0])+str(i[1])+str(i[2]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000','100','010','110','001','101','011','111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['000','100','010','110','001','101','011','111']])
if w==4:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3]) for i in patori.astype(int).tolist()])
countori=np.array([float(patori[i]) for i in ['0000','1000','0100','1100','0010','1010','0110','1110','0001',\
'1001','0101','1101','0011','1011','0111','1111']])
if w==5:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['00000','10000','01000','11000','00100','10100','01100','11100','00010',\
'10010','01010','11010','00110','10110','01110','11110','00001','10001','01001','11001','00101',\
'10101','01101','11101','00011','10011','01011','11011','00111','10111','01111','11111']])
if w==6:
pat = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in pat.astype(int).tolist()])
count=np.array([float(pat[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
if optional:
patori = Counter([str(i[0])+str(i[1])+str(i[2])+str(i[3])+str(i[4])+str(i[5]) for i in patori.astype(int).tolist()])
countori = np.array([float(patori[i]) for i in ['000000','100000','010000','110000','001000','101000','011000','111000','000100',\
'100100','010100','110100','001100','101100','011100','111100','000010','100010','010010','110010','001010',\
'101010','011010','111010','000110', '100110','010110','110110','001110','101110','011110','111110',\
'000001','100001','010001','110001','001001','101001','011001','111001','000101',\
'100101','010101','110101','001101','101101','011101','111101','000011','100011','010011','110011','001011',\
'101011','011011','111011','000111', '100111','010111','110111','001111','101111','011111','111111']])
count=count.reshape(2**w)
count=np.concatenate((count[[0]],count))
countori=countori.reshape(2**w)
countori=np.concatenate((countori[[0]],countori))
if w==3 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==3 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==4 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==4 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==5 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'M':M,'UM':UM,'strand':strand}, index=[0])
if w==5 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p01o':countori[1],'p02o':countori[2],'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,'strand':strand}, index=[0])
if w==6 and not optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'M':M,'UM':UM,\
'strand':strand}, index=[0])
if w==6 and optional:
opt=pd.DataFrame({'chrom':chrom,'pos':pos,'p01':count[1],'p02':count[2],'p03':count[3],'p04':count[4],\
'p05':count[5],'p06':count[6],'p07':count[7],'p08':count[8],'p09':count[9],'p10':count[10],\
'p11':count[11],'p12':count[12],'p13':count[13],'p14':count[14],'p15':count[15],\
'p16':count[16],'p17':count[17],'p18':count[18],'p19':count[19],'p20':count[20],\
'p21':count[21],'p22':count[22],'p23':count[23],'p24':count[24],'p25':count[25],\
'p26':count[26],'p27':count[27],'p28':count[28],'p29':count[29],'p30':count[30],\
'p31':count[31],'p32':count[32],'p33':count[33],'p34':count[34],'p35':count[35],\
'p36':count[36],'p37':count[37],'p38':count[38],'p39':count[39],'p40':count[40],\
'p41':count[41],'p42':count[42],'p43':count[43],'p44':count[44],'p45':count[45],\
'p46':count[46],'p47':count[47],'p48':count[48],'p49':count[49],'p50':count[50],\
'p51':count[51],'p52':count[52],'p53':count[53],'p54':count[54],'p55':count[55],\
'p56':count[56],'p57':count[57],'p58':count[58],'p59':count[59],'p60':count[60],\
'p61':count[61],'p62':count[62],'p63':count[63],'p64':count[64],'p01o':countori[1],'p02o':countori[2],\
'p03o':countori[3],'p04o':countori[4],\
'p05o':countori[5],'p06o':countori[6],'p07o':countori[7],'p08o':countori[8],'p09o':countori[9],'p10o':countori[10],\
'p11o':countori[11],'p12o':countori[12],'p13o':countori[13],'p14o':countori[14],'p15o':countori[15],\
'p16o':countori[16],'p17o':countori[17],'p18o':countori[18],'p19o':countori[19],'p20o':countori[20],\
'p21o':countori[21],'p22o':countori[22],'p23o':countori[23],'p24o':countori[24],'p25o':countori[25],\
'p26o':countori[26],'p27o':countori[27],'p28o':countori[28],'p29o':countori[29],'p30o':countori[30],\
'p31o':countori[31],'p32o':countori[32],'p33o':countori[33],'p34o':countori[34],\
'p35o':countori[35],'p36o':countori[36],'p37o':countori[37],'p38o':countori[38],'p39o':countori[39],'p40o':countori[40],\
'p41o':countori[41],'p42o':countori[42],'p43o':countori[43],'p44o':countori[44],'p45o':countori[45],\
'p46o':countori[46],'p47o':countori[47],'p48o':countori[48],'p49o':countori[49],'p50o':countori[50],\
'p51o':countori[51],'p52o':countori[52],'p53o':countori[53],'p54o':countori[54],'p55o':countori[55],\
'p56o':countori[56],'p57o':countori[57],'p58o':countori[58],'p59o':countori[59],'p60o':countori[60],\
'p61o':countori[61],'p62o':countori[62],'p63o':countori[63],'p64o':countori[64],'M':M,'UM':UM,'Mo':Mo,'UMo':UMo,\
'strand':strand}, index=[0])
return opt
def impute(window,w):
full_ind=np.where(np.isnan(window).sum(axis=1)==0)[0]
part_ind=np.where(np.isnan(window).sum(axis=1)==1)[0]
for i in range(len(part_ind)):
sam = []
# which column is nan
pos=np.where(np.isnan(window[part_ind[i],:]))[0]
if np.unique(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos]).shape[0]==1:
window[part_ind[i],pos]=window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos][0]
else:
#print("win_part i pos =",window[part_ind[i],pos])
for j in range(len(full_ind)):
if (window[part_ind[i],:]==window[full_ind[j],:]).sum()==w-1:
sam.append(j)
if len(sam)>0:
s1=random.sample(sam, 1)
s=window[full_ind[s1],pos]
else:
s=random.sample(window[np.where(np.invert(np.isnan(window[:,pos])))[0],pos].tolist(), k=1)[0]
window[part_ind[i],pos]=np.float64(s)
#print("win_part i =",window[part_ind[i],pos])
#print("s = ",np.float64(s))
return window
def CGgenome_scr(bamfile,chrom,w,fa,mC=4,silence=False,optional=False,folder='MeHdata'):
filename, file_extension = os.path.splitext(bamfile)
coverage = cov_context = 0
# load bamfile
samfile = pysam.AlignmentFile("%s/%s.bam" % (folder,filename), "rb")
# load reference genome
fastafile = pysam.FastaFile('%s/%s.fa' % (folder,fa))
# initialise data frame for genome screening (load C from bam file)
aggreR = aggreC = pd.DataFrame(columns=['Qname'])
# if user wants to output compositions of methylation patterns at every eligible window, initialise data frame
if w==3 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','M','UM','strand'])
if w==4 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','M','UM','strand'])
if w==5 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'M','UM','Mo','UMo','strand'])
if w==6 and not optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','M','UM','strand'])
if w==7 and not optional:
ResultPW = pd.DataFrame(columns=\
['chrom','pos','M','UM','p01','p02','p03','p04','p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16'\
,'p17','p18','p19','p20','p21','p22','p23','p24','p25','p26','p27','p28',\
'p29','p30','p31','p32','p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46'\
,'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60','p61','p62','p63','p64'\
,'p65','p66','p67','p68','p69','p70','p71','p72','p73','p74','p75','p76','p77','p78','p79','p80','p81','p82','p83','p84','p85','p86'\
,'p87','p88','p89','p90','p91','p92','p93','p94','p95','p96','p97','p98','p99','p100','p101','p102','p103','p104'\
,'p105','p106','p107','p108','p109','p120','p121','p122','p123','p124','p125','p126','p127','p128','strand'])
if w==3 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','M','UM','Mo','UMo','strand'])
if w==4 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','M','UM','Mo','UMo','strand'])
if w==5 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'M','UM','Mo','UMo','strand'])
if w==6 and optional:
ResultPW=pd.DataFrame(columns=['chrom','pos','p01','p02','p03','p04',\
'p05','p06','p07','p08','p09','p10','p11','p12','p13','p14','p15','p16','p17','p18',\
'p19','p20','p21','p22','p23','p24','p25','p26','p27','p28','p29','p30','p31','p32',\
'p33','p34','p35','p36','p37','p38','p39','p40','p41','p42','p43','p44','p45','p46',\
'p47','p48','p49','p50','p51','p52','p53','p54','p55','p56','p57','p58','p59','p60',\
'p61','p62','p63','p64','p01o','p02o','p03o','p04o',\
'p05o','p06o','p07o','p08o','p09o','p10o','p11o','p12o','p13o','p14o','p15o','p16o','p17o','p18o',\
'p19o','p20o','p21o','p22o','p23o','p24o','p25o','p26o','p27o','p28o','p29o','p30o','p31o','p32o',\
'p33o','p34o','p35o','p36o','p37o','p38o','p39o','p40o','p41o','p42o','p43o','p44o','p45o','p46o',\
'p47o','p48o','p49o','p50o','p51o','p52o','p53o','p54o','p55o','p56o','p57o','p58o','p59o','p60o',\
'p61o','p62o','p63o','p64o','M','UM','Mo','UMo','strand'])
neverr = never = True
chrom_list = []
# all samples' bam files
for i in samfile.get_index_statistics():
chrom_list.append(i.contig)
if chrom in chrom_list:
# screen bamfile by column
for pileupcolumn in samfile.pileup(chrom):
coverage += 1
if not silence:
if (pileupcolumn.pos % 2000000 == 1):
print("CG %s s %s w %s %s pos %s Result %s" % (datetime.datetime.now(),filename,w,chrom,pileupcolumn.pos,ResultPW.shape[0]))
# Forward strand, check if 'CG' in reference genome
if (fastafile.fetch(chrom,pileupcolumn.pos,pileupcolumn.pos+2)=='CG'):
cov_context += 1
temp = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
# append reads in the column
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and not pileupread.alignment.is_reverse: # C
d = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
df2 = pd.DataFrame(data=d)
temp=temp.append(df2, ignore_index=True)
# merge with other columns
if (not temp.empty):
aggreC = pd.merge(aggreC,temp,how='outer',on=['Qname'])
aggreC = aggreC.drop_duplicates()
# Reverse strand, check if 'CG' in reference genome
if pileupcolumn.pos>1:
if (fastafile.fetch(chrom,pileupcolumn.pos-1,pileupcolumn.pos+1)=='CG'):
cov_context += 1
tempr = pd.DataFrame(columns=['Qname',pileupcolumn.pos+1])
pileupcolumn.set_min_base_quality(0)
for pileupread in pileupcolumn.pileups:
if not pileupread.is_del and not pileupread.is_refskip and pileupread.alignment.is_reverse: # C
dr = {'Qname': [pileupread.alignment.query_name], pileupcolumn.pos+1: [pileupread.alignment.query_sequence[pileupread.query_position]]}
dfr2 = pd.DataFrame(data=dr)
tempr=tempr.append(dfr2, ignore_index=True)
if (not tempr.empty):
aggreR = | pd.merge(aggreR,tempr,how='outer',on=['Qname']) | pandas.merge |
import nose
import unittest
from numpy import nan
from pandas.core.daterange import DateRange
from pandas.core.index import Index, MultiIndex
from pandas.core.common import rands, groupby
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal)
from pandas.core.panel import WidePanel
from collections import defaultdict
import pandas.core.datetools as dt
import numpy as np
import pandas.util.testing as tm
# unittest.TestCase
def commonSetUp(self):
self.dateRange = DateRange('1/1/2005', periods=250, offset=dt.bday)
self.stringIndex = Index([ | rands(8) | pandas.core.common.rands |
import pandas as pd
d_full = | pd.read_csv('../data/data.csv', sep='\t') | pandas.read_csv |
import evalml
import pandas as pd
import pytest
from bentoml.evalml import EvalMLModel
from tests._internal.helpers import assert_have_file_extension
test_df = pd.DataFrame([[42, "b"]])
@pytest.fixture(scope="session")
def binary_pipeline() -> "evalml.pipelines.BinaryClassificationPipeline":
X = | pd.DataFrame([[0, "a"], [0, "a"], [0, "a"], [42, "b"], [42, "b"], [42, "b"]]) | pandas.DataFrame |
# --------------------------------
# Name: featurelinelib.py
# Purpose: This file serves as a function library for the Feature line Toolboxes. Import as fll.
# Current Owner: <NAME>
# Last Modified: 8/31/2019
# Copyright: <NAME>
# ArcGIS Version: ArcGIS Pro/10.4
# Python Version: 3.5/2.7
# --------------------------------
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import arcpy
import os
import itertools
import math
try:
import pandas as pd
except:
arcpy.AddWarning("Some tools require the Pandas installed in the ArcGIS Python Install."
" Might require installing pre-requisite libraries and software.")
# Function Definitions
def func_report(function=None, reportBool=False):
"""This decorator function is designed to be used as a wrapper with other functions to enable basic try and except
reporting (if function fails it will report the name of the function that failed and its arguments. If a report
boolean is true the function will report inputs and outputs of a function.-David Wasserman"""
def func_report_decorator(function):
def func_wrapper(*args, **kwargs):
try:
func_result = function(*args, **kwargs)
if reportBool:
print("Function:{0}".format(str(function.__name__)))
print(" Input(s):{0}".format(str(args)))
print(" Output(s):{0}".format(str(func_result)))
return func_result
except Exception as e:
print(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args)))
print(e.args[0])
return func_wrapper
if not function: # User passed in a bool argument
def waiting_for_function(function):
return func_report_decorator(function)
return waiting_for_function
else:
return func_report_decorator(function)
def arc_tool_report(function=None, arcToolMessageBool=False, arcProgressorBool=False):
"""This decorator function is designed to be used as a wrapper with other GIS functions to enable basic try and except
reporting (if function fails it will report the name of the function that failed and its arguments. If a report
boolean is true the function will report inputs and outputs of a function.-<NAME>"""
def arc_tool_report_decorator(function):
def func_wrapper(*args, **kwargs):
try:
func_result = function(*args, **kwargs)
if arcToolMessageBool:
arcpy.AddMessage("Function:{0}".format(str(function.__name__)))
arcpy.AddMessage(" Input(s):{0}".format(str(args)))
arcpy.AddMessage(" Output(s):{0}".format(str(func_result)))
if arcProgressorBool:
arcpy.SetProgressorLabel("Function:{0}".format(str(function.__name__)))
arcpy.SetProgressorLabel(" Input(s):{0}".format(str(args)))
arcpy.SetProgressorLabel(" Output(s):{0}".format(str(func_result)))
return func_result
except Exception as e:
arcpy.AddMessage(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__),
str(args)))
print(
"{0} - function failed -|- Function arguments were:{1}.".format(str(function.__name__), str(args)))
print(e.args[0])
return func_wrapper
if not function: # User passed in a bool argument
def waiting_for_function(function):
return arc_tool_report_decorator(function)
return waiting_for_function
else:
return arc_tool_report_decorator(function)
@arc_tool_report
def arc_print(string, progressor_Bool=False):
""" This function is used to simplify using arcpy reporting for tool creation,if progressor bool is true it will
create a tool label."""
casted_string = str(string)
if progressor_Bool:
arcpy.SetProgressorLabel(casted_string)
arcpy.AddMessage(casted_string)
print(casted_string)
else:
arcpy.AddMessage(casted_string)
print(casted_string)
@arc_tool_report
def field_exist(featureclass, fieldname):
"""ArcFunction
Check if a field in a feature class field exists and return true it does, false if not.- <NAME>"""
fieldList = arcpy.ListFields(featureclass, fieldname)
fieldCount = len(fieldList)
if (fieldCount >= 1) and fieldname.strip(): # If there is one or more of this field return true
return True
else:
return False
@arc_tool_report
def add_new_field(in_table, field_name, field_type, field_precision="#", field_scale="#", field_length="#",
field_alias="#", field_is_nullable="#", field_is_required="#", field_domain="#"):
"""ArcFunction
Add a new field if it currently does not exist. Add field alone is slower than checking first.- <NAME>"""
if field_exist(in_table, field_name):
print(field_name + " Exists")
arcpy.AddMessage(field_name + " Exists")
else:
print("Adding " + field_name)
arcpy.AddMessage("Adding " + field_name)
arcpy.AddField_management(in_table, field_name, field_type, field_precision, field_scale,
field_length,
field_alias,
field_is_nullable, field_is_required, field_domain)
@arc_tool_report
def validate_df_names(dataframe, output_feature_class_workspace):
"""Returns pandas dataframe with all col names renamed to be valid arcgis table names."""
new_name_list = []
old_names = dataframe.columns.names
for name in old_names:
new_name = arcpy.ValidateFieldName(name, output_feature_class_workspace)
new_name_list.append(new_name)
rename_dict = {i: j for i, j in zip(old_names, new_name_list)}
dataframe.rename(index=str, columns=rename_dict)
return dataframe
@arc_tool_report
def arcgis_table_to_dataframe(in_fc, input_fields, query="", skip_nulls=False, null_values=None):
"""Function will convert an arcgis table into a pandas dataframe with an object ID index, and the selected
input fields. Uses TableToNumPyArray to get initial data."""
OIDFieldName = arcpy.Describe(in_fc).OIDFieldName
if input_fields:
final_fields = [OIDFieldName] + input_fields
else:
final_fields = [field.name for field in arcpy.ListFields(in_fc)]
np_array = arcpy.da.TableToNumPyArray(in_fc, final_fields, query, skip_nulls, null_values)
object_id_index = np_array[OIDFieldName]
fc_dataframe = | pd.DataFrame(np_array, index=object_id_index, columns=input_fields) | pandas.DataFrame |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import PathPatch
def plot_fclayer_models_test_set_results_lut():
general_without_fu_model_file_name = "test_set_results_FCLayer_LUT_general_without_fully_unfolded_configs"
general_with_fu_model_file_name = "test_set_results_FCLayer_LUT_general_with_fully_unfolded_configs"
general_augmentation_model_file_name = "test_set_results_FCLayer_LUT_general_augmentation"
specialized_model_file_name = "test_set_results_FCLayer_LUT_specialized"
specialized_augmentation_model_file_name = "test_set_results_FCLayer_LUT_specialized_augmentation"
general_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_without_fu_model_file_name
general_with_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_with_fu_model_file_name
general_augmentation_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_augmentation_model_file_name
specialized_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_model_file_name
specialized_augmentation_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_augmentation_model_file_name
df_general_without_fu_model = pd.read_csv(general_without_fu_model_folder_path)
df_general_with_fu_model = pd.read_csv(general_with_fu_model_folder_path)
df_general_augmentation_model = pd.read_csv(general_augmentation_model_folder_path)
df_specialized_model = pd.read_csv(specialized_model_folder_path)
df_specialized_augmentation_model = pd.read_csv(specialized_augmentation_model_folder_path)
#import pdb; pdb.set_trace()
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_with_fu_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_with_fu_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_with_fu_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_without_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_without_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_without_fu_model['svr_rel_error']
#recheck if they are the same - hls and finn
#df_plot['HLS (general model + aug)'] = df_general_augmentation_model['hls_rel_error']
#df_plot['FINN (general model + aug)'] = df_general_augmentation_model['finn_rel_error']
df_plot['SVR GM (PD + AUG)'] = df_general_augmentation_model['svr_rel_error']
df_plot['HLS SM'] = df_specialized_model['hls_rel_error']
df_plot['FINN SM'] = df_specialized_model['finn_rel_error']
df_plot['SVR SM'] = df_specialized_model['svr_rel_error']
#recheck if they are the same - hls and finn
#df_plot['HLS (specialized model + aug)'] = df_specialized_augmentation_model['hls_rel_error']
#df_plot['FINN (specialized model + aug)'] = df_specialized_augmentation_model['finn_rel_error']
df_plot['SVR SM (PD + AUG)'] = df_specialized_augmentation_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=True, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow']
#import pdb; pdb.set_trace()
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('FCLayer - LUT estimation model - Test Set Results')
plt.ylabel('Relative error [%]')
fig.savefig('../test_set_results/FCLayer/test_set_results_plot_luts_with_fu_with_outliers.png', bbox_inches='tight')
def add_newest_finn_estimation_to_the_csv_file(filename):
#finn estimate computed with the clasifier
df_updated_finn_estimate = pd.read_csv("../test_set_results/updated_fclayer_database_finn_estimate.csv")
filepath = "../test_set_results/FCLayer/%s.csv" % filename
#the other csv file that needs to be updated
df_initial = pd.read_csv(filepath)
df_initial['bram_new_finn_estimate'] = -1
#remove rows of df_updated_finn_estimate not found in df_initial
#copy to df_initial finn_new_estimate
if (filename == 'test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu'):
parameters = ['mh', 'mw', 'pe', 'simd', 'wdt', 'idt']
else:
parameters = ['mh', 'mw', 'pe', 'simd', 'wdt', 'idt', 'act', 'mem_mode']
for index1, row1 in df_initial[parameters].iterrows():
if (filename == 'test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu'):
df_temp = df_updated_finn_estimate.loc[(df_updated_finn_estimate.mh == row1['mh']) & (df_updated_finn_estimate.mw == row1['mw']) & (df_updated_finn_estimate.pe == row1['pe']) & (df_updated_finn_estimate.simd == row1['simd']) & (df_updated_finn_estimate.idt == row1['idt']) & (df_updated_finn_estimate.wdt == row1['wdt'])]
else:
df_temp = df_updated_finn_estimate.loc[(df_updated_finn_estimate.mh == row1['mh']) & (df_updated_finn_estimate.mw == row1['mw']) & (df_updated_finn_estimate.pe == row1['pe']) & (df_updated_finn_estimate.simd == row1['simd']) & (df_updated_finn_estimate.idt == row1['idt']) & (df_updated_finn_estimate.wdt == row1['wdt']) & (df_updated_finn_estimate.act == row1['act']) & (df_updated_finn_estimate.mem_mode == row1['mem_mode'])]
if not df_temp.empty:
df_initial.at[index1, 'bram_new_finn_estimate'] = int(df_temp.iloc[0]['BRAM_new'])
df_initial["Total_BRAM_18K_synth_denom"] = df_initial["Total_BRAM_18K synth"].apply(lambda x: 1 if x == 0 else x)
df_initial["finn_rel_error_new"] = df_initial.apply(lambda x: (abs(x['bram_new_finn_estimate'] - x["Total_BRAM_18K synth"])/x["Total_BRAM_18K_synth_denom"])*100, axis=1)
filepath_to_save = "../test_set_results/FCLayer/%s_updated.csv" % filename
df_initial.to_csv(filepath_to_save, index = False, header=True)
#import pdb; pdb.set_trace()
def plot_fclayer_models_test_set_results_bram():
general_with_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_general_plus_fu_updated"
general_without_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_general_min_fu_updated"
specialized_without_fu_model_file_name = "test_set_results_FCLayer_Total_BRAM_18K_specialized_min_fu_updated"
general_with_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_with_fu_model_file_name
general_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % general_without_fu_model_file_name
specialized_without_fu_model_folder_path = "../test_set_results/FCLayer/%s.csv" % specialized_without_fu_model_file_name
df_general_with_fu_model = pd.read_csv(general_with_fu_model_folder_path)
df_general_without_fu_model = pd.read_csv(general_without_fu_model_folder_path)
df_specialized_without_fu_model = pd.read_csv(specialized_without_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_with_fu_model['hls_rel_error']
#df_plot['FINN GM (FD)'] = df_general_with_fu_model['finn_rel_error']
#df_plot['FINN NEW GM (FD)'] = df_general_with_fu_model['finn_rel_error_new']
df_plot['SVR GM (FD)'] = df_general_with_fu_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_without_fu_model['hls_rel_error']
#df_plot['FINN GM (PD)'] = df_general_without_fu_model['finn_rel_error']
#df_plot['FINN NEW GM (PD)'] = df_general_without_fu_model['finn_rel_error_new']
df_plot['SVR GM (PD)'] = df_general_without_fu_model['svr_rel_error']
#df_plot['HLS SM (PD)'] = df_specialized_without_fu_model['hls_rel_error']
#df_plot['FINN SM (PD)'] = df_specialized_without_fu_model['finn_rel_error']
#df_plot['FINN NEW SM (PD)'] = df_specialized_without_fu_model['finn_rel_error_new']
#df_plot['SVR SM (PD)'] = df_specialized_without_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=False, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow']
#colors = ['lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow', 'lightskyblue', 'lightyellow']
#import pdb; pdb.set_trace()
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('FCLayer - BRAM estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/FCLayer/test_set_results_plot_bram_without_pd.png', bbox_inches='tight')
def plot_thresholding_models_test_set_results_lut():
general_model_file_name = "test_set_results_Thresholding_LUT_general"
general_augmentation_model_file_name = "test_set_results_Thresholding_LUT_general_augmentation"
general_min_fu_model_file_name = "test_set_results_Thresholding_LUT_general_min_fu"
general_min_fu_augmentation_model_file_name = "test_set_results_Thresholding_LUT_general_min_fu_augmentation"
specialized_model_file_name = "test_set_results_Thresholding_LUT_specialized"
specialized_min_fu_model_file_name = "test_set_results_Thresholding_LUT_specialized_min_fu"
general_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_model_file_name
general_augmentation_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_augmentation_model_file_name
general_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_model_file_name
general_min_fu_augmentation_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_augmentation_model_file_name
specialized_model_folder_path = "../test_set_results/Thresholding/%s.csv" % specialized_model_file_name
specialized_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % specialized_min_fu_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_general_augmentation_model = pd.read_csv(general_augmentation_model_folder_path)
df_general_min_fu_model = pd.read_csv(general_min_fu_model_folder_path)
df_general_min_fu_augmentation_model = pd.read_csv(general_min_fu_augmentation_model_folder_path)
df_specialized_model = pd.read_csv(specialized_model_folder_path)
df_specialized_min_fu_model = pd.read_csv(specialized_min_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_model['svr_rel_error']
#df_plot['HLS GM (FD + AUG)'] = df_general_augmentation_model['hls_rel_error']
#df_plot['FINN GM (FD + AUG)'] = df_general_augmentation_model['finn_rel_error']
#df_plot['SVR GM (FD + AUG)'] = df_general_augmentation_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_min_fu_model['svr_rel_error']
#df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
#df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
#df_plot['SVR GM (PD + AUG)'] = df_general_min_fu_augmentation_model['svr_rel_error']
#df_plot['HLS SM (FD)'] = df_specialized_model['hls_rel_error']
#df_plot['FINN SM (FD)'] = df_specialized_model['finn_rel_error']
#df_plot['SVR SM (FD)'] = df_specialized_model['svr_rel_error']
df_plot['HLS SM (PD)'] = df_specialized_min_fu_model['hls_rel_error']
df_plot['FINN SM (PD)'] = df_specialized_min_fu_model['finn_rel_error']
df_plot['SVR SM (PD)'] = df_specialized_min_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=False, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
#colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Thresholding Layer - LUT estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Thresholding/test_set_results_LUT_plot_without_outliers_plus_fu.png', bbox_inches='tight')
def plot_thresholding_models_test_set_results_bram():
general_model_file_name = "test_set_results_Thresholding_Total_BRAM_18K_general"
general_min_fu_model_file_name = "test_set_results_Thresholding_Total_BRAM_18K_general_min_fu"
general_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_model_file_name
general_min_fu_model_folder_path = "../test_set_results/Thresholding/%s.csv" % general_min_fu_model_file_name
df_general_model = pd.read_csv(general_model_folder_path)
df_general_min_fu_model = pd.read_csv(general_min_fu_model_folder_path)
df_plot = pd.DataFrame()
df_plot['HLS GM (FD)'] = df_general_model['hls_rel_error']
df_plot['FINN GM (FD)'] = df_general_model['finn_rel_error']
df_plot['SVR GM (FD)'] = df_general_model['svr_rel_error']
df_plot['HLS GM (PD)'] = df_general_min_fu_model['hls_rel_error']
df_plot['FINN GM (PD)'] = df_general_min_fu_model['finn_rel_error']
df_plot['SVR GM (PD)'] = df_general_min_fu_model['svr_rel_error']
fig = plt.figure(figsize=(20, 11))
boxplot = df_plot.boxplot(showmeans=True, showfliers=True, return_type='dict', color=dict(boxes='black', whiskers='black', medians='r', caps='black'), patch_artist=True)
colors = ['lightskyblue', 'lightgreen', 'lightyellow', 'lightskyblue', 'lightgreen', 'lightyellow']
for patch, color in zip(boxplot['means'], colors):
patch.set_markeredgecolor('red')
patch.set_markerfacecolor('red')
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.xticks(rotation = 45)
plt.title('Thresholding Layer - BRAM estimation model - Test Set Results')
plt.ylabel('Relative error [%] ')
fig.savefig('../test_set_results/Thresholding/test_set_results_BRAM_plot_with_outliers.png', bbox_inches='tight')
def plot_SWU_models_test_set_results_lut():
general_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_general"
specialized_dw_0_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_specialized_dw_0"
specialized_dw_1_model_file_name = "test_set_results_Sliding_Window_Unit_LUT_specialized_dw_1"
general_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % general_model_file_name
specialized_dw_0_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_0_model_file_name
specialized_dw_1_model_folder_path = "../test_set_results/Sliding_Window_Unit/%s.csv" % specialized_dw_1_model_file_name
df_general_model = | pd.read_csv(general_model_folder_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
SnowPy - A Python library to upload and doownload data from database systems
===============================================================================
- SnowPy data tools (datatools)
'SnowPy' was intially realsed as data tools (etl) for the mltoolkit project (https://mltoolkit.github.io/MLToolKit).
Author
------
- <NAME>
License
-------
- Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
Created Date
----------
- Sat Sep 28 2019
"""
from timeit import default_timer as timer
import gc
import socket
import getpass
import traceback
import sys
import os
import shutil
import csv
import urllib
import subprocess
import uuid
import pandas as pd
import numpy as np
try:
import snowflake.connector
from snowflake.sqlalchemy import URL
except:
print("Error loading 'snowflake' library. Snowflake suppport disabled")
try:
import sqlalchemy
except:
print("Error loading 'sqlalchemy' library. database suppport disabled")
def print_data_resource_execute_format(): # Generate given use case (e.g. For SQL sever) in future version
data_connector = {
'type' : 'mssql', #{'snowflake', 'mysql', 'csv', 'json', 'pickle', 'hdf'}
'connect_parameters' : {
'file_path':None,
'account' : None,
'server': None,
'database' : None,
'schema' : None,
'auth' : None # Only for sources with access permission # auth = {'type':'user', 'user':'user', 'password':'password', 'role': 'reader'}
},
'computing_parameters' : {
'warehouse' : None, # For data warehouse services like Snowflake
'driver' : '' # For SQL server connections
}
}
data_object = {
'identifiers' :{
'file_name' : None,
'table' : None, # Sor SQL
'key' : None, # for HDF. Can also used to adress sheets in Excel
'dataset_label' : None
},
'structure_parameters' : {
'columns' : None,
'index' : False,
'header': 'infer',
'partition_columns': None, # For parquet Ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_parquet.html
'orient' : None, # For JSON {'records', 'index', 'split} # Ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_json.html
},
'format_parameters': {
'dtypes' : None,
'date_columns' : None,
'separator' : ',',
'line_break' : '\r\n',
'quoting' : 'ALL',
'encoding' : None,
'mode' : 'a', # for HDF {‘r’, ‘a’, 'w'}
'compression' : 'infer', # 'infer' for Pickle # 'zlib'for HDF # Pickle, https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_pickle.html#pandas.DataFrame.to_pickle
'file_format' : 'fixed', # for HDF {'fixed', 'table'}. Currently support only 'fixed'
}
}
execute_params = {
'chunksize' : None,
'on_error' : 'ignore',
'return_time' : False, # time to execute
'return_rowcount' : False, # rows affected
'backend' : 'pandas' , # to use pandas read/write methods
'temp_file_path': '', # intermediate file for processing large dataset
'read_params' : {
'start' : None,
'stop' : None
},
'write_params': {
'if_exists' : 'fail',
'insertion_method' : None, # If pandas {'multi'}
'checksum' : False, # for HDF
'complevel' : 0, # for HDF
'protocol' : -1 # Pickle https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_pickle.html#pandas.DataFrame.to_pickle
}
}
print('data_connector =', data_connector)
print('data_object =', data_object)
print('execute_params =', execute_params)
def print_execute_time(execute_time, task_name=''): #convert ot use as decorator in future version
try:
execute_time = float(execute_time or 0)
except:
execute_time = 0.0
print('{} time is {:.3f} s'.format(task_name, execute_time))
def print_records_count(records_count, task_name=''): #convert ot use as decorator in future version
try:
records_count = int(records_count or 0)
except:
records_count = 0
print('{} {:,d} records'.format(task_name, records_count))
###############################################################################
def set_field_value(value, default=None):
"""
Parameters
----------
value : str, int, float, object
default :str, int, float, object, defualt None
Returns
-------
value :str, int, float, object
"""
try:
return value
except:
return default
def get_field_value(fields_dict, key1=None, key2=None, default=None):
"""
Parameters
----------
fields_dict : dict
key1 :str
key2 : str
default :str, int, float, object, defualt None
Returns
-------
value :str, int, float, object
"""
try:
if key1!=None and key2!=None:
value = set_field_value(fields_dict[key1][key2], default=default)
elif key1!=None and key2==None:
value = set_field_value(fields_dict[key1], default=default)
else:
raise Exception('Key error !')
return value
except:
return default
def read_data(query=None, data_connector=None, data_object =None, execute_params=None):
"""
Parameters
----------
query : str
data_connector : dict
run mltk.print_data_resource_execute_format() to get the format
data_object : dict
run mltk.print_data_resource_execute_format() to get the format
execute_params : dict
run mltk.print_data_resource_execute_format() to get the format
Returns
-------
DataFrame : pandas.DataFrame
"""
# Data Source Paramaters ###
source_type = get_field_value(data_connector, 'type', default=None)
server = get_field_value(data_connector, 'connect_parameters','server', default=None)
database = get_field_value(data_connector, 'connect_parameters','server', default=None)
auth = get_field_value(data_connector, 'connect_parameters','server', default=None)
schema = get_field_value(data_connector, 'connect_parameters','server', default=None)
warehouse = get_field_value(data_connector, 'computing_parameters','server', default=None)
driver = get_field_value(data_connector, 'computing_parameters','driver', default='SQL Server')
#
file_path = get_field_value(data_connector, 'connect_parameters','file_path', default='tempfile.tmp')
# Data Object Parameters ###
file_name = get_field_value(data_object, 'identifiers','file_name', default='tempfile.tmp')
table = get_field_value(data_object, 'identifiers','table', default=None)
key = get_field_value(data_object, 'identifiers','key', default='frame1')
dataset_label = get_field_value(data_object, 'identifiers','dataset_label', default=None)
#
columns = get_field_value(data_object, 'structure_parameters','columns', default=None)
index = get_field_value(data_object, 'structure_parameters','index', default=False)
header = get_field_value(data_object, 'structure_parameters','header', default='infer')
partition_columns = get_field_value(data_object, 'structure_parameters','partition_columns', default=None)
orient = get_field_value(data_object, 'structure_parameters','orient', default= 'records')
#
dtypes = get_field_value(data_object,'format_parameters','dtypes', default=None)
date_columns = get_field_value(data_object,'format_parameters','date_columns', default=None)
separator = get_field_value(data_object,'format_parameters','separator', default=',')
line_break = get_field_value(data_object, 'format_parameters','line_break', default='\r\n')
quoting = get_field_value(data_object,'format_parameters','quoting', default='ALL')
encoding = get_field_value(data_object,'format_parameters','encoding', default='utf-8')
mode = get_field_value(data_object,'format_parameters','mode', default='a')
compression = get_field_value(data_object, 'format_parameters','compression', default='infer')
file_format = get_field_value(data_object, 'format_parameters','file_format', default='fixed')
# Execute Parameters ###
chunksize = get_field_value(execute_params, 'chunksize', default=None)
on_error = get_field_value(execute_params, 'on_error', default='ignore')
return_time = get_field_value(execute_params, 'return_time', default=False)
return_rowcount = get_field_value(execute_params, 'return_rowcount', default=False)
backend = get_field_value(execute_params, 'backend', default='pandas')
temp_file_path = get_field_value(execute_params, 'temp_file_path', default='')
params = get_field_value(execute_params, 'params', default=None)
# Read Parameters
start = get_field_value(execute_params, 'read_params', 'start', default=None)
stop = get_field_value(execute_params, 'read_params', 'stop', default=None)
# Write Parameters
if_exists = get_field_value(execute_params, 'write_params', 'if_exists', default='fail')
insertion_method = get_field_value(execute_params, 'write_params','insertion_method', default='multi')
complevel = get_field_value(execute_params, 'write_params', 'complevel', default=0)
checksum = get_field_value(execute_params, 'write_params', 'checksum', default=False)
protocol = get_field_value(execute_params, 'write_params', 'protocol', default=0)
partition_columns = get_field_value(execute_params, 'write_params', 'partition_columns', default=None)
if source_type == 'mssql':
read_data_mssql(
query=query,
server=server,
database=database,
auth=auth,
driver=driver,
on_error=on_error,
return_time=return_time,
params=params
)
elif source_type == 'csv':
print(source_type, file_path)
DataFrame = read_data_csv(
file=file_path,
separator=separator,
quoting=quoting ,
compression=compression,
encoding=encoding,
on_error=on_error,
return_time=return_time
)
elif source_type == 'pickle':
DataFrame = read_data_pickle(
file=file_path,
compression=compression
)
elif source_type == 'hdf':
DataFrame = read_data_hdf(
file=file_path,
key=key,
mode=mode,
start=start,
stop=stop,
header=header,
columns=columns,
on_error=on_error
)
elif source_type == 'snowflake':
DataFrame = read_data_snowflake(
query=query,
server=server,
database=database,
auth=auth,
schema=schema,
warehouse=warehouse,
date_columns=date_columns,
chunksize=chunksize,
backend=backend,
on_error=on_error,
return_time=return_time
)
elif source_type == 'parquet':
DataFrame = read_data_parquet(
file=file_path,
engine=engine,
columns=columns,
on_error=on_error
)
else:
print('source_type {} not supported'.format(source_type))
DataFrame = pd.DataFrame()
return DataFrame
def write_data(DataFrame=None, data_connector=None, data_object =None, execute_params=None):
"""
Parameters
----------
DataFrame : pandas.DataFrame
data_resource : dict
run mltk.print_data_resource_execute_format() to get the format
execute_params : dict
run mltk.print_data_resource_execute_format() to get the format
Returns
-------
None
"""
# Data Source Paramaters ###
source_type = get_field_value(data_connector, 'type', default=None)
server = get_field_value(data_connector, 'connect_parameters','server', default=None)
database = get_field_value(data_connector, 'connect_parameters','server', default=None)
auth = get_field_value(data_connector, 'connect_parameters','server', default=None)
schema = get_field_value(data_connector, 'connect_parameters','server', default=None)
warehouse = get_field_value(data_connector, 'computing_parameters','server', default=None)
driver = get_field_value(data_connector, 'computing_parameters','driver', default='SQL Server')
#
file_path = get_field_value(data_connector, 'connect_parameters','file_path', default='tempfile.tmp')
# Data Object Parameters ###
file_name = get_field_value(data_object, 'identifiers','file_name', default='tempfile.tmp')
table = get_field_value(data_object, 'identifiers','table', default=None)
key = get_field_value(data_object, 'identifiers','key', default='frame1')
dataset_label = get_field_value(data_object, 'identifiers','dataset_label', default=None)
#
columns = get_field_value(data_object, 'structure_parameters','columns', default=None)
index = get_field_value(data_object, 'structure_parameters','index', default=False)
header = get_field_value(data_object, 'structure_parameters','header', default=True)
partition_columns = get_field_value(data_object, 'structure_parameters','partition_columns', default=None)
orient = get_field_value(data_object, 'structure_parameters','orient', default= 'records')
#
dtypes = get_field_value(data_object,'format_parameters','dtypes', default=None)
date_columns = get_field_value(data_object,'format_parameters','date_columns', default=None)
separator = get_field_value(data_object,'format_parameters','separator', default=',')
line_break = get_field_value(data_object, 'format_parameters','line_break', default='\r\n')
quoting = get_field_value(data_object,'format_parameters','quoting', default='ALL')
encoding = get_field_value(data_object,'format_parameters','encoding', default='utf-8')
mode = get_field_value(data_object,'format_parameters','mode', default='a')
compression = get_field_value(data_object, 'format_parameters','compression', default='infer')
file_format = get_field_value(data_object, 'format_parameters','file_format', default='fixed')
# Execute Parameters ###
chunksize = get_field_value(execute_params, 'chunksize', default=None)
on_error = get_field_value(execute_params, 'on_error', default='ignore')
return_time = get_field_value(execute_params, 'return_time', default=False)
return_rowcount = get_field_value(execute_params, 'return_rowcount', default=False)
backend = get_field_value(execute_params, 'backend', default='pandas')
temp_file_path = get_field_value(execute_params, 'temp_file_path', default='')
params = get_field_value(execute_params, 'params', default=None)
# Read Parameters
start = get_field_value(execute_params, 'read_params', 'start', default=None)
stop = get_field_value(execute_params, 'read_params', 'stop', default=None)
# Write Parameters
if_exists = get_field_value(execute_params, 'write_params', 'if_exists', default='fail')
insertion_method = get_field_value(execute_params, 'write_params','insertion_method', default='multi')
complevel = get_field_value(execute_params, 'write_params', 'complevel', default=0)
checksum = get_field_value(execute_params, 'write_params', 'checksum', default=False)
protocol = get_field_value(execute_params, 'write_params', 'protocol', default=0)
partition_columns = get_field_value(execute_params, 'write_params', 'partition_columns', default=None)
if source_type == 'mssql':
write_data_mssql(
DataFrame,
server=server,
database=database,
schema=schema,
table=table,
index=index,
dtypes=dtypes,
if_exists=if_exists,
auth=auth,
insertion_method=insertion_method,
chunksize=chunksize,
driver=driver,
on_error=on_error,
return_time=return_time,
params=params
)
elif source_type == 'csv':
write_data_csv(
DataFrame=DataFrame,
file=file_path,
separator=separator,
index=index,
header=header,
columns=columns,
quoting=quoting,
encoding=encoding,
compression=compression,
chunksize=chunksize,
on_error=on_error,
return_time=return_time
)
elif source_type == 'pickle':
write_data_pickle(
DataFrame=DataFrame,
file=file_path,
compression=compression,
protocol=protocol
)
elif source_type == 'hdf':
write_data_hdf(
DataFrame=DataFrame,
file=file_path,
key=key,
mode=mode,
file_format=file_format,
columns=columns,
compression=compression,
complevel=complevel,
checksum=checksum,
if_exists=if_exists,
on_error=on_error
)
elif source_type == 'snowflake':
write_data_snowflake(
DataFrame=DataFrame,
server=server,
database=database,
auth=auth,
schema=schema,
table=table,
columns=columns,
warehouse=warehouse,
index=index,
if_exists=if_exists,
chunksize=chunksize,
dtypes=dtypes,
insertion_method=insertion_method,
temp_file_path=temp_file_path,
dataset_label=dataset_label,
backend=backend,
on_error=on_error,
return_time=return_time
)
elif source_type == 'parquet':
write_data_parquet(
DataFrame=DataFrame,
file=file_path,
engine=engine,
compression=compression,
index=index,
partition_columns=partition_columns,
if_exists=if_exists,
on_error=on_error
)
else:
print('source_type {} not supported'.format(source_type))
return None
def read_data_parquet(file, engine='auto', columns=None, on_error='strict'):
"""
Reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_hdf.html
Parameters
----------
file : str
engine : {‘auto’, ‘pyarrow’, ‘fastparquet’}, default 'auto'
columns : list, default None
on_error : {'strict', errors, }, default 'strict' Ref: https://docs.python.org/3/library/functions.html#open
Returns
-------
DataFrame : pandas.DataFrame
"""
try:
start_time = timer()
DataFrame = pd.read_parquet(path=file, engine=engine, columns=columns)
execute_time = timer() - start_time
rowcount = len(DataFrame.index)
except:
execute_time = 0
rowcount = 0
print(traceback.format_exc())
DataFrame = pd.DataFrame()
print_execute_time(execute_time, task_name='read')
print_records_count(records_count=rowcount, task_name='read')
return DataFrame
def write_data_parquet(DataFrame, file, engine='auto', compression='snappy', index=None, partition_columns=None, if_exists=None, on_error='strict'):
"""
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_hdf.html#pandas.DataFrame.to_hdf
compression=compression,
complevel=complevel,
Parameters
----------
DataFrame : pandas.DataFrame
file : str
engine : {‘auto’, ‘pyarrow’, ‘fastparquet’}, default 'auto'
compression : {‘snappy’, ‘gzip’, ‘brotli’, None}, default ‘snappy’
index : boot, default None
partition_columns : list, default None
if_exists : str, default None
on_error : {'strict', errors'}, default 'strict' Ref: https://docs.python.org/3/library/functions.html#open
Returns
-------
None
"""
if compression == 'infer' or compression==None:
compression = 'snappy'
try:
start_time = timer()
DataFrame.to_parquet(fname=file, engine=engine, compression=compression, index=index, partition_cols=partition_columns)
execute_time = timer() - start_time
rowcount = len(DataFrame.index)
except:
execute_time = 0
rowcount = 0
print(traceback.format_exc())
print_execute_time(execute_time, task_name='write')
print_records_count(records_count=rowcount, task_name='write')
def read_data_hdf(file, key='frame', mode='a', start=None, stop=None, columns=None, on_error='strict'):
"""
Reference: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_hdf.html
Parameters
----------
file : str
key : object, default 'frame'
mode : {'r', 'r+', 'a'} default 'a'
start : int, default None
stop : int, default None
columns : list, default None
on_error : {'strict', errors, }, default 'strict' Ref: https://docs.python.org/3/library/functions.html#open
Returns
-------
DataFrame : pandas.DataFrame
"""
try:
start_time = timer()
DataFrame = pd.read_hdf(path_or_buf=file, key=key, mode=mode, start=start, stop=stop, columns=columns, errors=on_error)
execute_time = timer() - start_time
rowcount = len(DataFrame.index)
except:
execute_time = 0
rowcount = 0
print(traceback.format_exc())
DataFrame = pd.DataFrame()
print_execute_time(execute_time, task_name='read')
print_records_count(records_count=rowcount, task_name='read')
return DataFrame
def write_data_hdf(DataFrame, file, key='frame', mode='a', file_format='fixed', columns=None, compression='zlib', complevel=0, checksum=False, if_exists=None, on_error='strict'):
"""
https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_hdf.html#pandas.DataFrame.to_hdf
compression=compression,
complevel=complevel,
Parameters
----------
DataFrame : pandas.DataFrame
file : str
key : object, default 'frame'
mode : {'w', 'r+', 'a'} default 'w'
format {'fixed', 'table'}, default 'fixed'
if_exists : str, default None
columns : list, default None
compression : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
complevel : int , default 0
checksum : bool, default False
on_error : {'strict', errors'}, default 'strict' Ref: https://docs.python.org/3/library/functions.html#open
Returns
-------
None
"""
if if_exists == 'append':
append = True
else:
append = False
if compression == 'infer' or compression==None:
compression = 'zlib'
try:
start_time = timer()
DataFrame.to_hdf(path_or_buf=file, key=key, mode=mode, format=file_format, if_exists=if_exists, data_columns=columns, checksum=checksum, complevel=complevel, complib=compression, on_error=on_error)
execute_time = timer() - start_time
rowcount = len(DataFrame.index)
except:
execute_time = 0
rowcount = 0
print(traceback.format_exc())
print_execute_time(execute_time, task_name='write')
print_records_count(records_count=rowcount, task_name='write')
def read_data_pickle(file, compression='infer'):
"""
https://docs.python.org/3/library/pickle.html
"Warning The pickle module is not secure against erroneous or maliciously constructed data.
Never unpickle data received from an untrusted or unauthenticated source."
Parameters
----------
file : str
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
Returns
-------
DataFrame : pandas.DataFrame
"""
try:
start_time = timer()
DataFrame = pd.read_pickle(path=file, compression=compression)
execute_time = timer() - start_time
rowcount = len(DataFrame.index)
except:
execute_time = 0
rowcount = 0
print(traceback.format_exc())
DataFrame = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/python
import pandas as pd
import collections
import os
import numpy as np
import argparse
import dendropy
import copy
import os
import re
from Bio import SeqIO
parser = argparse.ArgumentParser(description='test')
parser.add_argument('-o', '--output-dir', type=str)
parser.add_argument('-a', '--accessory-dir', type=str)
parser.add_argument('-p', '--prefix', type=str)
args = parser.parse_args()
query_dir = args.output_dir
accessory_dir = args.accessory_dir
prefix = args.prefix
tree = dendropy.Tree.get(path=f'{accessory_dir}/wol.nwk', schema='newick')
backbone_leaves = [a.taxon.label for a in tree.leaf_nodes()]
dis_mat = | pd.DataFrame(index=None, columns=backbone_leaves) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
)
import pandas._testing as tm
dt_data = [
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timestamp("2011-01-03"),
]
tz_data = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
pd.Timestamp("2011-01-03", tz="US/Eastern"),
]
td_data = [
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
]
period_data = [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2011-03", freq="M"),
]
data_dict = {
"bool": [True, False, True],
"int64": [1, 2, 3],
"float64": [1.1, np.nan, 3.3],
"category": Categorical(["X", "Y", "Z"]),
"object": ["a", "b", "c"],
"datetime64[ns]": dt_data,
"datetime64[ns, US/Eastern]": tz_data,
"timedelta64[ns]": td_data,
"period[M]": period_data,
}
class TestConcatAppendCommon:
"""
Test common dtype coercion rules between concat and append.
"""
@pytest.fixture(params=sorted(data_dict.keys()))
def item(self, request):
key = request.param
return key, data_dict[key]
item2 = item
def _check_expected_dtype(self, obj, label):
"""
Check whether obj has expected dtype depending on label
considering not-supported dtypes
"""
if isinstance(obj, Index):
assert obj.dtype == label
elif isinstance(obj, Series):
if label.startswith("period"):
assert obj.dtype == "Period[M]"
else:
assert obj.dtype == label
else:
raise ValueError
def test_dtypes(self, item):
# to confirm test case covers intended dtypes
typ, vals = item
self._check_expected_dtype(Index(vals), typ)
self._check_expected_dtype(Series(vals), typ)
def test_concatlike_same_dtypes(self, item):
# GH 13660
typ1, vals1 = item
vals2 = vals1
vals3 = vals1
if typ1 == "category":
exp_data = Categorical(list(vals1) + list(vals2))
exp_data3 = Categorical(list(vals1) + list(vals2) + list(vals3))
else:
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3)
tm.assert_index_equal(res, exp)
# index.append name mismatch
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="y")
res = i1.append(i2)
exp = Index(exp_data)
tm.assert_index_equal(res, exp)
# index.append name match
i1 = Index(vals1, name="x")
i2 = Index(vals2, name="x")
res = i1.append(i2)
exp = Index(exp_data, name="x")
tm.assert_index_equal(res, exp)
# cannot append non-index
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append(vals2)
with pytest.raises(TypeError, match="all inputs must be Index"):
Index(vals1).append([Index(vals2), vals3])
# ----- Series ----- #
# series.append
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
res = Series(vals1)._append([Series(vals2), Series(vals3)], ignore_index=True)
exp = Series(exp_data3)
tm.assert_series_equal(res, exp)
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
# name mismatch
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="y")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data)
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# name match
s1 = Series(vals1, name="x")
s2 = Series(vals2, name="x")
res = s1._append(s2, ignore_index=True)
exp = Series(exp_data, name="x")
tm.assert_series_equal(res, exp, check_index_type=True)
res = pd.concat([s1, s2], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# cannot append non-index
msg = (
r"cannot concatenate object of type '.+'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append(vals2)
with pytest.raises(TypeError, match=msg):
Series(vals1)._append([Series(vals2), vals3])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), vals2])
with pytest.raises(TypeError, match=msg):
pd.concat([Series(vals1), Series(vals2), vals3])
def test_concatlike_dtypes_coercion(self, item, item2, request):
# GH 13660
typ1, vals1 = item
typ2, vals2 = item2
vals3 = vals2
# basically infer
exp_index_dtype = None
exp_series_dtype = None
if typ1 == typ2:
# same dtype is tested in test_concatlike_same_dtypes
return
elif typ1 == "category" or typ2 == "category":
# The `vals1 + vals2` below fails bc one of these is a Categorical
# instead of a list; we have separate dedicated tests for categorical
return
warn = None
# specify expected dtype
if typ1 == "bool" and typ2 in ("int64", "float64"):
# series coerces to numeric based on numpy rule
# index doesn't because bool is object dtype
exp_series_dtype = typ2
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif typ2 == "bool" and typ1 in ("int64", "float64"):
exp_series_dtype = typ1
mark = pytest.mark.xfail(reason="GH#39187 casting to object")
request.node.add_marker(mark)
warn = FutureWarning
elif (
typ1 == "datetime64[ns, US/Eastern]"
or typ2 == "datetime64[ns, US/Eastern]"
or typ1 == "timedelta64[ns]"
or typ2 == "timedelta64[ns]"
):
exp_index_dtype = object
exp_series_dtype = object
exp_data = vals1 + vals2
exp_data3 = vals1 + vals2 + vals3
# ----- Index ----- #
# index.append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Index(vals1).append(Index(vals2))
exp = Index(exp_data, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# 3 elements
res = Index(vals1).append([Index(vals2), Index(vals3)])
exp = Index(exp_data3, dtype=exp_index_dtype)
tm.assert_index_equal(res, exp)
# ----- Series ----- #
# series._append
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(Series(vals2), ignore_index=True)
exp = Series(exp_data, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp, check_index_type=True)
# concat
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat([Series(vals1), Series(vals2)], ignore_index=True)
tm.assert_series_equal(res, exp, check_index_type=True)
# 3 elements
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = Series(vals1)._append(
[Series(vals2), Series(vals3)], ignore_index=True
)
exp = Series(exp_data3, dtype=exp_series_dtype)
tm.assert_series_equal(res, exp)
with tm.assert_produces_warning(warn, match="concatenating bool-dtype"):
# GH#39817
res = pd.concat(
[Series(vals1), Series(vals2), Series(vals3)],
ignore_index=True,
)
tm.assert_series_equal(res, exp)
def test_concatlike_common_coerce_to_pandas_object(self):
# GH 13626
# result must be Timestamp/Timedelta, not datetime.datetime/timedelta
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"])
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Timestamp("2011-01-01"),
pd.Timestamp("2011-01-02"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
]
)
res = dti.append(tdi)
tm.assert_index_equal(res, exp)
assert isinstance(res[0], pd.Timestamp)
assert isinstance(res[-1], pd.Timedelta)
dts = Series(dti)
tds = Series(tdi)
res = dts._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
res = pd.concat([dts, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
assert isinstance(res.iloc[0], pd.Timestamp)
assert isinstance(res.iloc[-1], pd.Timedelta)
def test_concatlike_datetimetz(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 7795
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz=tz)
exp = pd.DatetimeIndex(
["2011-01-01", "2011-01-02", "2012-01-01", "2012-01-02"], tz=tz
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo", "EST5EDT"])
def test_concatlike_datetimetz_short(self, tz):
# GH#7795
ix1 = pd.date_range(start="2014-07-15", end="2014-07-17", freq="D", tz=tz)
ix2 = pd.DatetimeIndex(["2014-07-11", "2014-07-21"], tz=tz)
df1 = DataFrame(0, index=ix1, columns=["A", "B"])
df2 = DataFrame(0, index=ix2, columns=["A", "B"])
exp_idx = pd.DatetimeIndex(
["2014-07-15", "2014-07-16", "2014-07-17", "2014-07-11", "2014-07-21"],
tz=tz,
)
exp = DataFrame(0, index=exp_idx, columns=["A", "B"])
tm.assert_frame_equal(df1._append(df2), exp)
tm.assert_frame_equal(pd.concat([df1, df2]), exp)
def test_concatlike_datetimetz_to_object(self, tz_aware_fixture):
tz = tz_aware_fixture
# GH 13660
# different tz coerces to object
dti1 = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)
dti2 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"])
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01"),
pd.Timestamp("2012-01-02"),
],
dtype=object,
)
res = dti1.append(dti2)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts2 = Series(dti2)
res = dts1._append(dts2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# different tz
dti3 = pd.DatetimeIndex(["2012-01-01", "2012-01-02"], tz="US/Pacific")
exp = Index(
[
pd.Timestamp("2011-01-01", tz=tz),
pd.Timestamp("2011-01-02", tz=tz),
pd.Timestamp("2012-01-01", tz="US/Pacific"),
pd.Timestamp("2012-01-02", tz="US/Pacific"),
],
dtype=object,
)
res = dti1.append(dti3)
tm.assert_index_equal(res, exp)
dts1 = Series(dti1)
dts3 = Series(dti3)
res = dts1._append(dts3)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([dts1, dts3])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period(self):
# GH 13660
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01", "2012-02"], freq="M")
exp = pd.PeriodIndex(["2011-01", "2011-02", "2012-01", "2012-02"], freq="M")
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_diff_freq_to_object(self):
# GH 13221
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
pi2 = pd.PeriodIndex(["2012-01-01", "2012-02-01"], freq="D")
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Period("2012-01-01", freq="D"),
pd.Period("2012-02-01", freq="D"),
],
dtype=object,
)
res = pi1.append(pi2)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
ps2 = Series(pi2)
res = ps1._append(ps2)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, ps2])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concatlike_common_period_mixed_dt_to_object(self):
# GH 13221
# different datetimelike
pi1 = pd.PeriodIndex(["2011-01", "2011-02"], freq="M")
tdi = pd.TimedeltaIndex(["1 days", "2 days"])
exp = Index(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
],
dtype=object,
)
res = pi1.append(tdi)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = ps1._append(tds)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([ps1, tds])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
# inverse
exp = Index(
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Period("2011-01", freq="M"),
pd.Period("2011-02", freq="M"),
],
dtype=object,
)
res = tdi.append(pi1)
tm.assert_index_equal(res, exp)
ps1 = Series(pi1)
tds = Series(tdi)
res = tds._append(ps1)
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
res = pd.concat([tds, ps1])
tm.assert_series_equal(res, Series(exp, index=[0, 1, 0, 1]))
def test_concat_categorical(self):
# GH 13524
# same categories -> category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
exp = Series([1, 2, np.nan, 2, 1, 2], dtype="category")
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# partially different categories => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1], dtype="category")
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# completely different categories (same dtype) => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([np.nan, 1, 3, 2], dtype="category")
exp = Series([10, 11, np.nan, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
def test_union_categorical_same_categories_different_order(self):
# https://github.com/pandas-dev/pandas/issues/19096
a = Series(Categorical(["a", "b", "c"], categories=["a", "b", "c"]))
b = Series(Categorical(["a", "b", "c"], categories=["b", "a", "c"]))
result = pd.concat([a, b], ignore_index=True)
expected = Series(
Categorical(["a", "b", "c", "a", "b", "c"], categories=["a", "b", "c"])
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_coercion(self):
# GH 13524
# category + not-category => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2])
exp = Series([1, 2, np.nan, 2, 1, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
# result shouldn't be affected by 1st elem dtype
exp = Series([2, 1, 2, 1, 2, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# all values are not in category => not-category
s1 = Series([3, 2], dtype="category")
s2 = Series([2, 1])
exp = Series([3, 2, 2, 1])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([2, 1, 3, 2])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# completely different categories => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series([1, 3, 2])
exp = Series([10, 11, np.nan, 1, 3, 2], dtype=np.float64)
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([1, 3, 2, 10, 11, np.nan], dtype=np.float64)
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# different dtype => not-category
s1 = Series([10, 11, np.nan], dtype="category")
s2 = Series(["a", "b", "c"])
exp = Series([10, 11, np.nan, "a", "b", "c"])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series(["a", "b", "c", 10, 11, np.nan])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
# if normal series only contains NaN-likes => not-category
s1 = Series([10, 11], dtype="category")
s2 = Series([np.nan, np.nan, np.nan])
exp = Series([10, 11, np.nan, np.nan, np.nan])
tm.assert_series_equal(pd.concat([s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s1._append(s2, ignore_index=True), exp)
exp = Series([np.nan, np.nan, np.nan, 10, 11])
tm.assert_series_equal(pd.concat([s2, s1], ignore_index=True), exp)
tm.assert_series_equal(s2._append(s1, ignore_index=True), exp)
def test_concat_categorical_3elem_coercion(self):
# GH 13524
# mixed dtypes => not-category
s1 = Series([1, 2, np.nan], dtype="category")
s2 = Series([2, 1, 2], dtype="category")
s3 = Series([1, 2, 1, 2, np.nan])
exp = Series([1, 2, np.nan, 2, 1, 2, 1, 2, 1, 2, np.nan], dtype="float")
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 2, 1, 2, np.nan, 1, 2, np.nan, 2, 1, 2], dtype="float")
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([1, 3, 4])
exp = Series([4, 5, 6, 1, 2, 3, 1, 3, 4])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([1, 3, 4, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
# values are all in either category => not-category
s1 = Series([4, 5, 6], dtype="category")
s2 = Series([1, 2, 3], dtype="category")
s3 = Series([10, 11, 12])
exp = Series([4, 5, 6, 1, 2, 3, 10, 11, 12])
tm.assert_series_equal(pd.concat([s1, s2, s3], ignore_index=True), exp)
tm.assert_series_equal(s1._append([s2, s3], ignore_index=True), exp)
exp = Series([10, 11, 12, 4, 5, 6, 1, 2, 3])
tm.assert_series_equal(pd.concat([s3, s1, s2], ignore_index=True), exp)
tm.assert_series_equal(s3._append([s1, s2], ignore_index=True), exp)
def test_concat_categorical_multi_coercion(self):
# GH 13524
s1 = | Series([1, 3], dtype="category") | pandas.Series |
#!/usr/bin/env python
import pandas as pd
import yfinance as yf
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', None)
df = None
ticker = yf.Ticker('AACQ')
expiration_dates = ticker.options
print(expiration_dates)
df1 = ticker.option_chain('2021-05-20').puts
df2 = ticker.option_chain('2021-06-17').puts
print(df2)
df = pd.DataFrame()
df = pd.concat([df, df1], axis=0)
df = | pd.concat([df, df2], axis=0) | pandas.concat |
from data_get import *
from baseline_functions import *
from calendar_date import *
import global_vars
global_vars.init()
if global_vars.GRAPHFLAG > 0:
from graph_functions import *
from error_graphs import *
import mysql.connector
import pandas as pd
import datetime
import time
# main()
# This function goes through each SAID in the SAID_TABLE sql table, retrieves its data and runs baselinining methods on them
def main():
# Connect to table that connects SAIDs
cnx = mysql.connector.connect(user=global_vars.DATABASE_USERNAME, password=global_vars.DATABASE_PASSWORD,
host=global_vars.DATABASE_IP_RO,
database=global_vars.DATABASE_NAME)
cursor = cnx.cursor()
# return
if global_vars.INTERVALFLAG == 0:
query = "SELECT * FROM SAID_TABLE_DR_15 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
interval = 15
dbnm = 'MIN15'
elif global_vars.INTERVALFLAG == 1:
query = "SELECT * FROM SAID_TABLE_DR_60 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
interval = 60
dbnm = 'MIN60'
elif global_vars.INTERVALFLAG == 2:
query = "SELECT * FROM SAID_TABLE_NONDR_15 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
elif global_vars.INTERVALFLAG == 3:
query = "SELECT * FROM SAID_TABLE_NONDR_60 LIMIT %i OFFSET %i" %(global_vars.SAID_LIMIT, global_vars.SAID_OFFSET)
cursor.execute(query)
said_counter = 0
tic = time.time()
said_packet = []
said_pack_count = 0
PACK_SIZE = 25
storage_df_columns = global_vars.storage_df_columns
pack_bank = []
# Go through each SAID
for row in cursor:
SAID = str(row[0]).zfill(10)
said_packet.append(SAID)
said_pack_count += 1
if said_pack_count%PACK_SIZE != 0:
continue
pack_bank.append(said_packet)
said_packet = []
if len(pack_bank)*PACK_SIZE > 1000:
# print(pack_bank)
break
else:
continue
said_pack_count = 0
for said_packet in pack_bank:
said_pack_count += PACK_SIZE
tic_packet = time.time()
print("packet",said_packet)
packet_string = '('+str(said_packet)[1:-1]+')'
try:
cnx = mysql.connector.connect(user=global_vars.DATABASE_USERNAME, password=global_vars.DATABASE_PASSWORD,
host=global_vars.DATABASE_IP_RO,
database=global_vars.DATABASE_NAME)
tic_query = time.time()
query = "SELECT * FROM %s WHERE SA in %s" %(dbnm, packet_string)
all_interval_df = pd.read_sql_query(query,cnx)
all_interval_df['DATE'] = pd.to_datetime(all_interval_df['DATE'])
toc_query = time.time()
print("All interval df shape:", all_interval_df.shape, "- time:", toc_query-tic_query)
except:
print("Interval_df error")
said_packet = []
continue
# storage_df = pd.DataFrame(columns=storage_df_columns)
nonres_storage_df = pd.DataFrame(columns=storage_df_columns)
pdp_storage_df = pd.DataFrame(columns=storage_df_columns)
cbp_storage_df = pd.DataFrame(columns=storage_df_columns)
bip_storage_df = pd.DataFrame(columns=storage_df_columns)
amp_storage_df = pd.DataFrame(columns=storage_df_columns)
res_storage_df = pd.DataFrame(columns=storage_df_columns)
smartrate_storage_df = pd.DataFrame(columns=storage_df_columns)
smartac_storage_df = pd.DataFrame(columns=storage_df_columns)
for SAID in said_packet:
tic_said = time.time()
try:
(program, NAICS, weather) = getInfo(SAID)
except Exception as e:
print("getInfo error")
continue
print(SAID, program)
try:
# Find DR days of SAID
DRDays = getDR(SAID, program)
except:
print("DRDays error")
continue
try:
# Find temperature for SAID
temp_df = getTemp(weather)
except:
print("temp_df error")
continue
# storage_list is used to have all relevant information for a single SAID in a single Date
# storage_list = [SAID, program, NAICS, date, max_temp...]
storage_list = [int(SAID), program, NAICS]
# will contain all storage lists for 1 said, so approx 731 rows
said_all_data = []
interval_df = all_interval_df.loc[all_interval_df['SA'] == SAID]
print(interval_df.shape)
# date is initialized to last day and will backtrack through every day
date = (interval_df['DATE'].max()).date()
while date.year > 2015:
# row_data = runFrequentBaseline(interval_df, DRDays, temp_df, interval, date, storage_list)
try:
row_data = runBaseline2(interval_df, DRDays, temp_df, interval, date, storage_list)
except Exception as e:
print("row_data error")
continue
if row_data != 'NA':
said_all_data.append(row_data)
date = date - datetime.timedelta(days=1)
said_df = pd.DataFrame(said_all_data, columns=storage_df_columns)
print("said_df shape:", said_df.shape)
# frames = [storage_df, said_df]
# storage_df = pd.concat(frames)
if program == 'PDP':
frames = [pdp_storage_df, said_df]
pdp_storage_df = pd.concat(frames)
frames = [nonres_storage_df, said_df]
nonres_storage_df = pd.concat(frames)
print("Nonres Shape", nonres_storage_df.shape)
elif program == 'AMP':
frames = [amp_storage_df, said_df]
amp_storage_df = | pd.concat(frames) | pandas.concat |
import os
from pathlib import Path
import numpy as np
from flask import Flask, request, jsonify, render_template
from joblib import load
import pandas as pd
def create_df(dicti):
transformed = {}
for feature in dicti:
if feature == 'MarketSegment':
if dicti[feature] == 'Online TA':
transformed['MarketSegment_Online TA']=1
else:
transformed['MarketSegment_Online TA']=0
elif feature == 'CustomerType':
if dicti[feature] == 'Transient':
transformed['CustomerType_Transient']=1
transformed['CustomerType_Transient-Party']=0
elif dicti[feature] == 'Transient-Party':
transformed['CustomerType_Transient']=0
transformed['CustomerType_Transient-Party']=1
else:
transformed['CustomerType_Transient']=0
transformed['CustomerType_Transient-Party']=0
elif feature == 'DepositType':
if dicti[feature] == 'Non Refund':
transformed['DepositType_Non Refund']=1
transformed['DepositType_No Deposit']=0
elif dicti[feature] == 'No Deposit':
transformed['DepositType_Non Refund']=0
transformed['DepositType_No Deposit']=1
else:
transformed['DepositType_Non Refund']=0
transformed['DepositType_No Deposit']=0
else:
transformed[feature]=dicti[feature]
rearrange_cols = ['LeadTime', 'TotalOfSpecialRequests', 'StaysInWeekNights', 'ADR',
'ArrivalDateWeekNumber', 'Agent', 'ReservedRoomType',
'DepositType_No Deposit', 'BookingChanges', 'AssignedRoomType',
'Country', 'PreviousCancellations', 'Company', 'DistributionChannel',
'DepositType_Non Refund', 'MarketSegment_Online TA',
'ArrivalDateDayOfMonth', 'ArrivalDateMonth', 'CustomerType_Transient',
'CustomerType_Transient-Party', 'RequiredCarParkingSpaces',
'DaysInWaitingList', 'StaysInWeekendNights',
'PreviousBookingsNotCanceled', 'Adults']
new_obv_df = | pd.DataFrame(transformed,index=[0]) | pandas.DataFrame |
import sys, os
import unittest
import pandas as pd
import numpy
import sys
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer, MinMaxScaler, MaxAbsScaler, RobustScaler,\
Binarizer, PolynomialFeatures, OneHotEncoder, KBinsDiscretizer
from sklearn_pandas import CategoricalImputer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier,\
RandomForestRegressor, IsolationForest
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
from nyoka.preprocessing import Lag
from nyoka import skl_to_pmml
from nyoka import PMML44 as pml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_pmml.pmml"
model = SVC()
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
## 1
svms = pmml_obj.SupportVectorMachineModel[0].SupportVectorMachine
for mod_val, recon_val in zip(model.intercept_, svms):
self.assertEqual("{:.16f}".format(mod_val), "{:.16f}".format(recon_val.Coefficients.absoluteValue))
## 2
svm = pmml_obj.SupportVectorMachineModel[0]
self.assertEqual(svm.RadialBasisKernelType.gamma,model._gamma)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "knn_pmml.pmml"
pipeline_obj = Pipeline([
('scaling',StandardScaler()),
('knn',KNeighborsClassifier(n_neighbors = 5))
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertIsNotNone(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.euclidean)
##2
self.assertEqual(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.kind, "distance")
##3
self.assertEqual(pipeline_obj.steps[-1][-1].n_neighbors, pmml_obj.NearestNeighborModel[0].numberOfNeighbors)
def test_sklearn_03(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "rf_pmml.pmml"
model = RandomForestClassifier(n_estimators = 100)
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("rfc", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
self.assertEqual(model.n_estimators,pmml_obj.MiningModel[0].Segmentation.Segment.__len__())
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "majorityVote")
def test_sklearn_04(self):
titanic = pd.read_csv("nyoka/tests/titanic_train.csv")
features = titanic.columns
target = 'Survived'
f_name = "gb_pmml.pmml"
pipeline_obj = Pipeline([
("imp", Imputer(strategy="median")),
("gbc", GradientBoostingClassifier(n_estimators = 10))
])
pipeline_obj.fit(titanic[features],titanic[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "modelChain")
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment.__len__(), 2)
##3
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment[1].RegressionModel.normalizationMethod, "logit")
def test_sklearn_05(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',DecisionTreeRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"dtr_pmml.pmml")
self.assertEqual(os.path.isfile("dtr_pmml.pmml"),True)
def test_sklearn_06(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
f_name = "linearregression_pmml.pmml"
model = LinearRegression()
pipeline_obj = Pipeline([
('model',model)
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name, True)
## 1
reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0]
self.assertEqual(reg_tab.intercept,model.intercept_)
## 2
for model_val, pmml_val in zip(model.coef_, reg_tab.NumericPredictor):
self.assertEqual("{:.16f}".format(model_val),"{:.16f}".format(pmml_val.coefficient))
def test_sklearn_07(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "logisticregression_pmml.pmml"
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("lr", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
segmentation = pmml_obj.MiningModel[0].Segmentation
self.assertEqual(segmentation.Segment.__len__(), model.classes_.__len__()+1)
## 2
self.assertEqual(segmentation.multipleModelMethod, "modelChain")
##3
self.assertEqual(segmentation.Segment[-1].RegressionModel.normalizationMethod, "simplemax")
##4
for i in range(model.classes_.__len__()):
self.assertEqual(segmentation.Segment[i].RegressionModel.normalizationMethod, "logit")
self.assertEqual("{:.16f}".format(model.intercept_[i]),\
"{:.16f}".format(segmentation.Segment[i].RegressionModel.RegressionTable[0].intercept))
def test_sklearn_08(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = [i%2 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('pca',PCA(2)),
('mod',LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pca_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pca_pmml.pmml"),True)
def test_sklearn_09(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdclassifier_pmml.pmml")
self.assertEqual(os.path.isfile("sgdclassifier_pmml.pmml"),True)
def test_sklearn_10(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("lsvc", LinearSVC())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "linearsvc_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvc_pmml.pmml"),True)
def test_sklearn_11(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearSVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearsvr_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvr_pmml.pmml"),True)
def test_sklearn_12(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',GradientBoostingRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"gbr.pmml")
self.assertEqual(os.path.isfile("gbr.pmml"),True)
def test_sklearn_13(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", DecisionTreeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "dtr_clf.pmml")
self.assertEqual(os.path.isfile("dtr_clf.pmml"),True)
def test_sklearn_14(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',RandomForestRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"rfr.pmml")
self.assertEqual(os.path.isfile("rfr.pmml"),True)
def test_sklearn_15(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',KNeighborsRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"knnr.pmml")
self.assertEqual(os.path.isfile("knnr.pmml"),True)
def test_sklearn_16(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',SVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"svr.pmml")
self.assertEqual(os.path.isfile("svr.pmml"),True)
def test_sklearn_17(self):
irisdata = datasets.load_iris()
iris = pd.DataFrame(irisdata.data,columns=irisdata.feature_names)
iris['Species'] = irisdata.target
feature_names = iris.columns.drop('Species')
X = iris[iris.columns.drop(['Species'])]
pipeline_obj = Pipeline([
('standard_scaler',StandardScaler()),
('Imputer',Imputer()),
('model',OneClassSVM())
])
pipeline_obj.fit(X)
skl_to_pmml(pipeline_obj, feature_names, pmml_f_name="one_class_svm.pmml")
self.assertEqual(os.path.isfile("one_class_svm.pmml"),True)
def test_sklearn_18(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", GaussianNB())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "gnb.pmml")
self.assertEqual(os.path.isfile("gnb.pmml"),True)
def test_sklearn_19(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdc.pmml")
self.assertEqual(os.path.isfile("sgdc.pmml"),True)
def test_sklearn_20(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", RidgeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "ridge.pmml")
self.assertEqual(os.path.isfile("ridge.pmml"),True)
def test_sklearn_21(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", LinearDiscriminantAnalysis())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "lda.pmml")
self.assertEqual(os.path.isfile("lda.pmml"),True)
def test_sklearn_22(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("scaler",Binarizer(threshold=2)),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "binarizer.pmml")
self.assertEqual(os.path.isfile("binarizer.pmml"),True)
def test_sklearn_23(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("scaler",MinMaxScaler()),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "minmax.pmml")
self.assertEqual(os.path.isfile("minmax.pmml"),True)
def test_sklearn_24(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("scaler",RobustScaler()),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "robust.pmml")
self.assertEqual(os.path.isfile("robust.pmml"),True)
def test_sklearn_25(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("scaler",MaxAbsScaler()),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "maxabs.pmml")
self.assertEqual(os.path.isfile("maxabs.pmml"),True)
def test_sklearn_25_(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
irisd['new'] = [i%3 for i in range(iris.data.shape[0])]
irisd.to_csv("test_new.csv",index=False)
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapper", DataFrameMapper([
(["new"], LabelBinarizer()),
(iris.feature_names, None)
])),
('scale', StandardScaler()),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "labelbinarizer.pmml")
self.assertEqual(os.path.isfile("labelbinarizer.pmml"),True)
def test_sklearn_26(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
irisd['new'] = [i%3 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
model = LinearRegression()
pipeline_obj = Pipeline([
("mapper", DataFrameMapper([
(["new"], OneHotEncoder(categories='auto'))
])),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "onehot.pmml")
self.assertEqual(os.path.isfile("onehot.pmml"),True)
def test_sklearn_27(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['origin'],axis=1)
y = df['origin']
features = [name for name in df.columns if name not in ('origin')]
target = 'origin'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', CountVectorizer())
])),
('model',LogisticRegression())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"countvec.pmml")
self.assertEqual(os.path.isfile("countvec.pmml"),True)
def test_sklearn_28(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
irisd['new'] = [i%3 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapper", DataFrameMapper([
(["new"], PolynomialFeatures())
])),
("model", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "polyfeat.pmml")
self.assertEqual(os.path.isfile("polyfeat.pmml"),True)
def test_sklearn_29(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = MLPClassifier()
pipe = Pipeline([
('lag', Lag(aggregation="stddev", value=3)),
('model',model)
])
pipe.fit(irisd[features], irisd[target])
file_name = 'mlp_model_numlti_class_classification.pmml'
skl_to_pmml(pipe, iris.feature_names, target,file_name)
self.assertEqual(os.path.isfile(file_name),True)
def test_sklearn_30(self):
iris = datasets.load_iris()
model = KMeans()
pipe = Pipeline([
('model',model)
])
pipe.fit(iris.data)
file_name = 'kmeans_model.pmml'
skl_to_pmml(pipe, iris.feature_names, 'target',file_name)
self.assertEqual(os.path.isfile(file_name),True)
def test_sklearn_31(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = GradientBoostingClassifier()
pipe = Pipeline([
('scaler', MaxAbsScaler()),
('model',model)
])
pipe.fit(irisd[features], irisd[target])
file_name = 'gbc_model_numlti_class_classification.pmml'
skl_to_pmml(pipe, iris.feature_names, target,file_name)
self.assertEqual(os.path.isfile(file_name),True)
def test_sklearn_32(self):
from sklearn.neural_network import MLPClassifier
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['target'] = [i%2 for i in range(iris.data.shape[0])]
target = 'target'
features = irisd.columns.drop('target')
model = MLPClassifier()
pipe = Pipeline([
('lag', Lag(aggregation="sum", value=3)),
('model',model)
])
pipe.fit(irisd[features], irisd[target])
file_name = 'mlp_model_binary_class_classification.pmml'
skl_to_pmml(pipe, iris.feature_names, target,file_name)
self.assertEqual(os.path.isfile(file_name),True)
def test_sklearn_33(self):
irisdata = datasets.load_iris()
iris = pd.DataFrame(irisdata.data,columns=irisdata.feature_names)
iris['Species'] = irisdata.target
feature_names = iris.columns.drop('Species')
X = iris[iris.columns.drop(['Species'])]
pipeline_obj = Pipeline([
('standard_scaler',StandardScaler()),
('Imputer',Imputer()),
('model',IsolationForest())
])
pipeline_obj.fit(X)
skl_to_pmml(pipeline_obj, feature_names, pmml_f_name="iforest.pmml")
self.assertEqual(os.path.isfile("iforest.pmml"),True)
def test_sklearn_34(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['new'] = [i%2 for i in range(irisd.shape[0])]
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
model = LogisticRegression()
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('new', LabelBinarizer())
])),
('model',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,"lb_two.pmml")
self.assertEqual(os.path.isfile("lb_two.pmml"),True)
def test_sklearn_37(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = LogisticRegression()
pipeline_obj = Pipeline([
('new', StandardScaler()),
('imputer', Imputer()),
('model',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,"imputer.pmml")
self.assertEqual(os.path.isfile("imputer.pmml"),True)
def test_sklearn_38(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = LogisticRegression()
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
(['sepal length (cm)'], KBinsDiscretizer()),
])),
('model',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
with self.assertRaises(TypeError):
skl_to_pmml(pipeline_obj,features,target,"kbins.pmml")
def test_sklearn_39(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = GaussianProcessClassifier()
pipeline_obj = Pipeline([
('model',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
with self.assertRaises(NotImplementedError):
skl_to_pmml(pipeline_obj,numpy.array(features),target,"gpc.pmml")
def test_sklearn_40(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
target = 'Species'
features = irisd.columns.drop('Species')
model = GaussianProcessClassifier()
model.fit(irisd[features],irisd[target])
with self.assertRaises(TypeError):
skl_to_pmml(model,features,target,"no_pipeline.pmml")
def test_sklearn_41(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_linear.pmml"
model = SVC(kernel='linear')
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
self.assertEqual(os.path.isfile(f_name),True)
def test_sklearn_42(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_poly.pmml"
model = SVC(kernel='poly')
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
self.assertEqual(os.path.isfile(f_name),True)
def test_sklearn_44(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = [i%2 for i in range(irisd.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_bin.pmml"
model = SVC()
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
self.assertEqual(os.path.isfile(f_name),True)
def test_sklearn_45(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',MLPRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"mlpr_pmml.pmml")
self.assertEqual(os.path.isfile("mlpr_pmml.pmml"),True)
def test_sklearn_46(self):
iris = datasets.load_iris()
irisd = | pd.DataFrame(iris.data, columns=iris.feature_names) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 16:41:37 2018
@author: krzysztof
This module contains utilities useful when performing data analysis and drug sensitivity prediction with
Genomics of Drug Sensitivity in Cancer (GDSC) database.
Main utilities are Drug classes and Experiment class. All classes beginning with a word "Drug" represent the compound
coming from GDSC. There is a separate class for every corresponding experiment setup and genomic feature space. All Drug
classes contain methods for extraction and storage of proper input data. Available data types include: gene expression, binary copy number and coding variants, and cell line tissue type. The set of considered genes is represented as "targets"
attribute of Drug classes.
The Experiment class is dedicated for storage and analysis of results coming from machine learning experiments. Actual
machine learning is done outside of a class. The Experiment class have methods for storage, analysis and visualisation
of results.
Classes:
Drug: Basic class representing a compound from GDSC.
DrugWithDrugBank: Inherits from Drug, accounts for target genes from DrugBank database.
DrugGenomeWide: Inherits from Drug, designed for using genome-wide gene exression as input data.
DrugDirectReactome: Inherits from DrugWithDrugBank, uses only input data related to target genes resulting
from direct compound-pathway matching from Reactome.
DrugWithGenesInSamePathways: Inherits from DrugWithDrugBank, uses only input data related to genes that belong in
the same pathways as target genes.
Experiment: Designed to store and analyze results coming from machine learning experiments.
"""
# Imports
import pandas as pd
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from scipy.stats import pearsonr
from sklearn.linear_model import ElasticNet
from sklearn import model_selection
from sklearn import metrics
from sklearn import preprocessing
from sklearn.dummy import DummyRegressor
from sklearn.pipeline import Pipeline
from sklearn import feature_selection
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor
from sklearn.base import clone
# General imports
import multiprocessing
import numpy as np
import pandas as pd
import time
import sys
import dill
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr
import collections
# Sklearn imports
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn import model_selection
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Lasso, ElasticNet
from stability_selection import StabilitySelection
#################################################################################################################
# Drug class
#################################################################################################################
class Drug(object):
"""Class representing compound from GDSC database.
This is the most basic, parent class. Different experimental settings will use more specific,
children classes. Main function of the class is to create and store input data corresponding to a given
drug. Five types of data are considered: gene expression, copy number variants, coding variants, gene expression
signatures, and tumor tissue type. Class instances are initialized with four basic drug properties: ID, name, gene
targets and target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_cnv_data_faster: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
extract_merck_signatures_data: Generate a DataFrame with gene expression signatures provided by Merck.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data and saves it in corresponding instance's
field.
return_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data but does not save it.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of Drug class objects, each referenced by it's ID
(keys are drug GDSC ID's)
load_data: Load all needed data files as DataFrames with one function call.
"""
# Class variables
map_from_hgnc_to_ensembl = None
map_from_ensembl_to_hgnc = None
# Instance methods
def __init__(self, gdsc_id, name, targets, target_pathway):
"""Intiliaze the class instance with four basic attributes. "Targets" are gene names
and get mapped into Ensembl IDs using class mapping variable."""
self.gdsc_id = gdsc_id
self.name = name
self.targets = targets
self.target_pathway = target_pathway
self.ensembl_targets = []
for x in self.targets:
try:
self.ensembl_targets.append(self.map_from_hgnc_to_ensembl[x])
except KeyError:
pass
def extract_drug_response_data(self, sensitivity_profiles_df, metric="AUC"):
"""Generate a DataFrame containing reponses for every cell line screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
None
"""
df = sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id][
["COSMIC_ID", metric]]
df.columns = ["cell_line_id", metric] # Insert column with samples ID
self.total_no_samples_screened = df.shape[0] # Record how many screened cell lines for drug
self.response_data = df # Put DataFrame into corresponding field
def extract_screened_cell_lines(self, sensitivity_profiles_df):
"""Generate set of cell lines screened for that drug.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
self.screened_cell_lines = list(
sensitivity_profiles_df[sensitivity_profiles_df.DRUG_ID == self.gdsc_id]["COSMIC_ID"])
def extract_gene_expression(self, gene_expression_df):
"""Generate DataFrame of gene expression data for cell lines screened for this drug, only
considering drug's target genes.
Arguments:
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
Returns:
None
"""
cell_lines_str = [] # Gene expressesion DF column names are strings
for x in self.screened_cell_lines:
cell_lines_str.append(str(x))
cl_to_extract = []
for x in cell_lines_str:
if x in list(gene_expression_df.columns):
cl_to_extract.append(x) # Extract only cell lines contained in gene expression data
gene_expr = gene_expression_df[
gene_expression_df.ensembl_gene.isin(self.ensembl_targets)][["ensembl_gene"] + cl_to_extract]
gene_expr_t = gene_expr.transpose()
columns = list(gene_expr_t.loc["ensembl_gene"])
gene_expr_t.columns = columns
gene_expr_t = gene_expr_t.drop(["ensembl_gene"])
rows = list(gene_expr_t.index)
gene_expr_t.insert(0, "cell_line_id", rows) # Insert columns with cell line IDs
gene_expr_t.reset_index(drop=True, inplace=True)
gene_expr_t["cell_line_id"] = pd.to_numeric(gene_expr_t["cell_line_id"])
self.gene_expression_data = gene_expr_t # Put DataFrame into corresponding field
def extract_mutation_data(self, mutation_df):
"""Generate a DataFrame with binary mutation calls for screened cell lines and target genes.
Arguments:
mutation_df: DataFrame with original mutation calls from GDSC.
Returns:
None
"""
targets = [x + "_mut" for x in self.targets]
df = mutation_df.copy()[
mutation_df.cosmic_sample_id.isin(self.screened_cell_lines)]
df = df[df.genetic_feature.isin(targets)][["cosmic_sample_id", "genetic_feature", "is_mutated"]]
cosmic_ids = []
genetic_features = {}
for feature in df.genetic_feature.unique():
genetic_features[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
df_cl = df[df.cosmic_sample_id == cl_id]
for feature in genetic_features:
mutation_status = df_cl[
df_cl.genetic_feature == feature]["is_mutated"].iloc[0]
genetic_features[feature].append(mutation_status)
df1 = pd.DataFrame()
df1.insert(0, "cell_line_id", cosmic_ids) # Insert column with samples IDs
for feature in genetic_features:
df1[feature] = genetic_features[feature]
self.mutation_data = df1 # Put DataFrame into corresponding field
def extract_cnv_data(self, cnv_binary_df):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in df.genetic_feature.unique():
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = df[
(df.cosmic_sample_id == cl_id) & (df.genetic_feature == feature)]["is_mutated"].iloc[0]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_cnv_data_faster(self, cnv_binary_df, map_cl_id_and_feature_to_status):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Faster implementation than original "extract_cnv_data" by using mapping between genes and
genomic segments.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
for row in cnv_binary_df.drop_duplicates(subset="genetic_feature").itertuples():
feature_name = getattr(row, "genetic_feature")
genes_in_segment = getattr(row, "genes_in_segment").split(",")
for target in self.targets:
if target in genes_in_segment:
features_to_extract.append(feature_name) # If target is in any segment, add it to the list
features_to_extract = list(set(features_to_extract))
df = df[df.genetic_feature.isin(features_to_extract)]
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in features_to_extract:
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = map_cl_id_and_feature_to_status[(cl_id, feature)]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def extract_tissue_data(self, cell_line_list):
"""Generate (dummy encoded) data with cell line tissue type.
Arguments:
cell_line_list (DataFrame): Cell line list from GDSC.
Returns:
None
"""
df = cell_line_list[
cell_line_list["COSMIC_ID"].isin(self.screened_cell_lines)][["COSMIC_ID", "Tissue"]]
df.rename(columns={"COSMIC_ID": "cell_line_id"}, inplace=True)
self.tissue_data = pd.get_dummies(df, columns = ["Tissue"])
def extract_merck_signatures_data(self, signatures_df):
"""Generate data with gene expression signature scores for GDSC cell lines, provided by Merck.
Arguments:
signatures_df (DataFrame): DataFrame with gene signatures for cell lines.
Returns:
None
"""
# Compute list of screened cell lines as strings with prefix "X" in order to match
# signatures DataFrame columns
cell_lines_str = ["X" + str(cl) for cl in self.screened_cell_lines]
# Compute list of cell lines that are contained in signatures data
cls_to_extract = [cl for cl in cell_lines_str
if cl in list(signatures_df.columns)]
# Extract desired subset of signatures data
signatures_of_interest = signatures_df[cls_to_extract]
# Transpose the DataFrame
signatures_t = signatures_of_interest.transpose()
# Create a list of cell line IDs whose format matches rest of the data
cl_ids = pd.Series(signatures_t.index).apply(lambda x: int(x[1:]))
# Insert proper cell line IDs as a new column
signatures_t.insert(0, "cell_line_id", list(cl_ids))
# Drop the index and put computed DataFrame in an instance field
self.merck_signatures = signatures_t.reset_index(drop=True)
def concatenate_data(self, data_combination):
"""Generate data containing chosen combination of genetic data classes.
Arguments:
data_combination: List of strings containing data classes to be included. Available options are:
"mutation", "expression", "CNV", "tissue", "merck signatures".
Returns:
None
"""
# Create a list of DataFrames to include
objects = [self.response_data]
if "mutation" in data_combination and self.mutation_data.shape[0] > 0:
objects.append(self.mutation_data)
if "expression" in data_combination and self.gene_expression_data.shape[0] > 0:
objects.append(self.gene_expression_data)
if "CNV" in data_combination and self.cnv_data.shape[0] > 0:
objects.append(self.cnv_data)
if "tissue" in data_combination and self.tissue_data.shape[0] > 0:
objects.append(self.tissue_data)
if "merck signatures" in data_combination and self.merck_signatures.shape[0] > 0:
objects.append(self.merck_signatures)
# Find intersection in cell lines for all desirable DataFrames
cl_intersection = set(list(self.response_data["cell_line_id"]))
for obj in objects:
cl_intersection = cl_intersection.intersection(set(list(obj["cell_line_id"])))
objects_common = []
for obj in objects:
objects_common.append(obj[obj["cell_line_id"].isin(cl_intersection)])
# Check if all DataFrames have the same number of samples
no_samples = objects_common[0].shape[0]
for obj in objects_common:
assert obj.shape[0] == no_samples
obj.sort_values("cell_line_id", inplace=True)
obj.reset_index(drop=True, inplace=True)
cl_ids = objects_common[0]["cell_line_id"]
df_concatenated = pd.concat(objects_common, axis=1, ignore_index=False)
metric = self.response_data.columns[-1] # Extract the name of metric which was used for sensitivity
sensitivities = df_concatenated[metric]
df_concatenated = df_concatenated.drop(["cell_line_id", metric], axis=1)
df_concatenated.insert(0, "cell_line_id", cl_ids)
df_concatenated.insert(df_concatenated.shape[1], metric, sensitivities)
self.full_data = df_concatenated
def create_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Combine extraction methods in one to generate a DataFrame with desired data.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
# Call separate methods for distinct data types
self.extract_screened_cell_lines(sensitivity_profiles_df)
self.extract_drug_response_data(sensitivity_profiles_df, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.extract_gene_expression(gene_expression_df)
if type(cnv_binary_df) == type(pd.DataFrame()):
self.extract_cnv_data_faster(cnv_binary_df, map_cl_id_and_feature_to_status)
if type(cell_line_list) == type(pd.DataFrame()):
self.extract_tissue_data(cell_line_list)
if type(mutation_df) == type(pd.DataFrame()):
self.extract_mutation_data(mutation_df)
if type(merck_signatures_df) == type(pd.DataFrame()):
self.extract_merck_signatures_data(merck_signatures_df)
self.concatenate_data(data_combination)
return self.full_data
def return_full_data(self, sensitivity_profiles_df, gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_line_list=None, mutation_df=None, merck_signatures_df=None,
data_combination=None, metric="AUC"):
"""Compute full data with desired data classes and return it, but after that delete data from
instance's data fields in order to save memory.
When calling a function, original DataFrames parsed should match strings in
data_combination argument. If any of the "_df" arguments is None (default value),
the corresponding data is not included in the output DataFrame.
Arguments:
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC.
gene_expression_df (DataFrame): Original GDSC gene expression DataFrame.
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data.
cell_line_list (DataFrame): Cell line list from GDSC.
mutation_df (DataFrame): DataFrame with original mutation calls from GDSC.
data_combination (list): list of strings containing data classes to be included. Available
options are: "mutation", "expression", "CNV, "tissue", "merck signatures".
metric (string): Which statistic to use as a response metric (default "AUC").
Returns:
DataFrame containing desired data for the drug
"""
full_df = self.create_full_data(sensitivity_profiles_df, gene_expression_df, cnv_binary_df,
map_cl_id_and_feature_to_status,
cell_line_list, mutation_df, merck_signatures_df,
data_combination, metric)
if type(gene_expression_df) == type(pd.DataFrame()):
self.gene_expression_data = None
if type(cnv_binary_df) == type(pd.DataFrame()):
self.cnv_data = None
if type(cell_line_list) == type(pd.DataFrame()):
self.tissue_data = None
if type(mutation_df) == type(pd.DataFrame()):
self.mutation_data = None
if type(merck_signatures_df) == type(pd.DataFrame()):
self.merck_signatures = None
self.full_data = None
return full_df
def __repr__(self):
"""Return string representation of an object, which can be used to create it."""
return 'Drug({}, "{}", {}, "{}")'.format(self.gdsc_id, self.name, self.targets, self.target_pathway)
def __str__(self):
"""Return string representation of an object"""
return "{} -- {}".format(self.name, self.gdsc_id)
# Class methods
@classmethod
def load_mappings(cls, filepath_hgnc_to_ensembl, filepath_ensembl_to_hgnc):
"""Load dictonaries with gene mappings between HGNC and Ensembl (from pickle files) and assign it
to corresponding class variables. Ensembl IDs are needed for gene expression data.
This method should be called on a Drug class before any other actions with the class.
Arguments:
filepath_hgnc_to_ensembl: file with accurate mapping
filepath_ensembl_to_hgnc: file with accurate mapping
Returns:
None
"""
cls.map_from_hgnc_to_ensembl = pickle.load(open(filepath_hgnc_to_ensembl, "rb"))
cls.map_from_ensembl_to_hgnc = pickle.load(open(filepath_ensembl_to_hgnc, "rb"))
# Static methods
@staticmethod
def create_drugs(drug_annotations_df):
"""Create a dictionary of Drug class objects, each referenced by it's ID (keys are drug GDSC ID's).
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website
Returns:
Dictionary of Drug objects as values and their ID's as keys
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
gdsc_id = getattr(row, "DRUG_ID")
name = getattr(row, "DRUG_NAME")
targets = getattr(row, "TARGET").split(", ")
target_pathway = getattr(row, "TARGET_PATHWAY")
drugs[gdsc_id] = Drug(gdsc_id, name, targets, target_pathway)
return drugs
@staticmethod
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response):
"""Load all needed files by calling one function and return data as tuple of DataFrames. All
argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df)
#################################################################################################################
# DrugWithDrugBank class
##################################################################################################################
class DrugWithDrugBank(Drug):
"""Class representing drug from GDSC database.
Contrary to the parent class Drug, this class also incorporates data related to targets
derived from DrugBank, not only those from GDSC. Main function of the class is to create and store input data
corresponding to a given drug. Four types of data are considered: gene expression, copy number variants,
coding variants and tumor tissue type. Class instances are initialized with four basic drug properties:
ID, name, gene targets and target pathway. Data attributes are stored as pandas DataFrames and are filled
using data files from GDSC via corresponding methods.
In general, all utilities are the same as in parent Drug class, with an exception of "create_drugs"
method, which is overloaded in order to account for target genes data coming from DrugBank.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID
(keys are drug GDSC ID's). Includes also target data coming from DrugBank.
load_data: Load all needed data files as DataFrames with one function call.
"""
def create_drugs(drug_annotations_df, drugbank_targets_mapping):
"""Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID. Add
also target data coming from DrugBank.
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website.
drugbank_targets_mapping (dictionary): Dictionary with mapping from drug ID to it's
targets from drugbank database.
Return:
Dictionary of DrugWithDrugBank objects as values and their ID's as keys.
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
name = getattr(row, "DRUG_NAME")
gdsc_id = getattr(row, "DRUG_ID")
targets = getattr(row, "TARGET").split(", ")
# Add targets from DrugBank (if drug is matched) and take a sum
if gdsc_id in drugbank_targets_mapping:
targets = list(set(targets + drugbank_targets_mapping[gdsc_id]))
target_pathway = getattr(row, "TARGET_PATHWAY")
# Create DrugWithDrugBank instance and put it into output dictionary
drugs[gdsc_id] = DrugWithDrugBank(gdsc_id, name, targets, target_pathway)
return drugs
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response, drugbank_targets):
"""Load all needed files by calling one function. All argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
# DrugBank targets
map_drugs_to_drugbank_targets = pickle.load(open(drugbank_targets, "rb"))
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df, map_drugs_to_drugbank_targets)
#################################################################################################################
# DrugGenomeWide class
#################################################################################################################
class DrugGenomeWide(Drug):
"""Class designed to represent a drug with genome-wide input data.
Main function of the class is to create and store input data corresponding to a given
drug. Four types of data are considered: gene expression, copy number variants, coding variants and tumor
tissue type. Class instances are initialized with four basic drug properties: ID, name, gene targets and
target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
In general, all the utilities are the same as in the parent Drug class, but with different input data.
When using this setting, we only use gene expression data as input, since it is recognized
as representative of the genome-wide cell line characterization. Therefore, other data extraction methods,
though available, should not be used when utilizing this class, for clarity. Two parent class
methods are overloaded: "extract_gene_expression" and "create_drugs".
Important note: in here, "create_full_data" method is not overloaded, but is supposed to be called
only parsing drug_response_df and gene_expression_df DataFrames and data_combination argument
set to "["expression"]".
--Example:
df = test_drug.create_full_data(drug_response_df, gene_expression_df, data_combination=["expression"])
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering all
available (genome-wide) genes. Created after calling the "extract_gene_expression"
method.
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data. See the note above for correct
usage with DrugGenomeWide class.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of Drug class objects, each referenced by it's ID
(keys are drug GDSC ID's)
load_data: Load all needed data files as DataFrames with one function call.
"""
def extract_gene_expression(self, gene_expression_df):
"""Generate DataFrame of gene expression data for cell lines screened for this drug,
genome-wide (all available genes).
Arguments:
gene_expression_df (DataFrame): original GDSC gene expression DataFrame
sensitivity_profiles_df (DataFrame): DataFrame of drug response data from GDSC
Return:
None
"""
cell_lines_str = [] # Gene expression DF column names are strings
for x in self.screened_cell_lines:
cell_lines_str.append(str(x))
cl_to_extract = []
for x in cell_lines_str:
if x in list(gene_expression_df.columns):
cl_to_extract.append(x) # Extract only cell lines contained in gene expression data
gene_expr = gene_expression_df[["ensembl_gene"] + cl_to_extract]
gene_expr_t = gene_expr.transpose()
columns = list(gene_expr_t.loc["ensembl_gene"])
gene_expr_t.columns = columns
gene_expr_t = gene_expr_t.drop(["ensembl_gene"])
rows = list(gene_expr_t.index)
gene_expr_t.insert(0, "cell_line_id", rows) # Insert columns with cell line IDs
gene_expr_t.reset_index(drop=True, inplace=True)
gene_expr_t["cell_line_id"] = pd.to_numeric(gene_expr_t["cell_line_id"])
# DataFrame should have same number of columns for each drug
assert gene_expr_t.shape[1] == 17738
self.gene_expression_data = gene_expr_t
def extract_mutation_data(self, mutation_df):
"""Generate a DataFrame with binary mutation calls for screened cell lines and target genes.
Arguments:
mutation_df: DataFrame with original mutation calls from GDSC.
Returns:
None
"""
targets = [x + "_mut" for x in self.targets]
df = mutation_df.copy()[
mutation_df.cosmic_sample_id.isin(self.screened_cell_lines)]
df = df[["cosmic_sample_id", "genetic_feature", "is_mutated"]]
cosmic_ids = []
genetic_features = {}
for feature in df.genetic_feature.unique():
genetic_features[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
df_cl = df[df.cosmic_sample_id == cl_id]
for feature in genetic_features:
mutation_status = df_cl[
df_cl.genetic_feature == feature]["is_mutated"].iloc[0]
genetic_features[feature].append(mutation_status)
df1 = pd.DataFrame()
df1.insert(0, "cell_line_id", cosmic_ids) # Insert column with samples IDs
for feature in genetic_features:
df1[feature] = genetic_features[feature]
self.mutation_data = df1 # Put DataFrame into corresponding field
def extract_cnv_data_faster(self, cnv_binary_df, map_cl_id_and_feature_to_status):
"""Generate data containing binary CNV calls for cell lines screened for the drug.
Faster implementation than original "extract_cnv_data" by using mapping between genes and
genomic segments.
Arguments:
cnv_binary_df: DataFrame from GDSC download tool with CNV data.
Returns:
None
"""
df = cnv_binary_df[cnv_binary_df.cosmic_sample_id.isin(self.screened_cell_lines)]
features_to_extract = [] # Map drug's targets to CNV features (segments)
cosmic_ids = []
feature_dict = {} # Separate lists for every column in final DataFrame
for feature in df.genetic_feature.unique():
feature_dict[feature] = []
for cl_id in df.cosmic_sample_id.unique():
cosmic_ids.append(cl_id)
for feature in feature_dict:
status = map_cl_id_and_feature_to_status[(cl_id, feature)]
feature_dict[feature].append(status)
new_df = pd.DataFrame()
for feature in feature_dict:
new_df[feature] = feature_dict[feature]
new_df.insert(0, "cell_line_id", cosmic_ids)
self.cnv_data = new_df
def create_drugs(drug_annotations_df):
"""Create a dictionary of DrugGenomeWide class objects, each referenced by it's ID.
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website
Returns:
Dictionary of DrugGenomeWide objects as values and their ID's as keys
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
gdsc_id = getattr(row, "DRUG_ID")
name = getattr(row, "DRUG_NAME")
targets = getattr(row, "TARGET").split(", ")
target_pathway = getattr(row, "TARGET_PATHWAY")
drugs[gdsc_id] = DrugGenomeWide(gdsc_id, name, targets, target_pathway)
return drugs
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response):
"""Load all needed files by calling one function. All argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df)
#################################################################################################################
# DrugDirectReactome class
#################################################################################################################
class DrugDirectReactome(DrugWithDrugBank):
"""Class representing compound from GDSC database.
Main function of the class is to create and store input data corresponding to a given
drug. Four types of data are considered: gene expression, copy number variants, coding variants and tumor
tissue type. Class instances are initialized with four basic drug properties: ID, name, gene targets and
target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
In this setting, drugs gene targets are derived not only from GDSC and DrugBank, but also using the direct
compound-pathway mapping from Reactome database. All genes belonging to corresponding Reactome target pathway
are considered when computing input data. The utilities are the same as in parent DrugWithDrugBank class with
an exception of "create_drugs" method which accounts for mappings coming from Reactome, and "load_data"
method.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID
(keys are drug GDSC ID's). Includes also target data coming from DrugBank.
load_data: Load all needed data files as DataFrames with one function call.
"""
def create_drugs(drug_annotations_df, drugbank_targets_mapping, reactome_direct_mapping):
"""Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID.
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website
drugbank_targets_mapping (dictionary): dictionary with mapping from drug ID to it's
targets from drugbank database
reactome_direct_mapping:
Returns:
Dictionary of Drug objects as values and their ID's as keys
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
name = getattr(row, "DRUG_NAME")
gdsc_id = getattr(row, "DRUG_ID")
# Create an object only if it exists in Reactome mapping dictionary
if gdsc_id in reactome_direct_mapping:
targets = getattr(row, "TARGET").split(", ")
# If this ID exists in DrugBank mapping, take the sum of all three sets
if gdsc_id in drugbank_targets_mapping:
targets = list(set(targets + drugbank_targets_mapping[gdsc_id] + reactome_direct_mapping[gdsc_id]))
# Otherwise add just the Reactome targets
else:
targets = list(set(targets + reactome_direct_mapping[gdsc_id]))
target_pathway = getattr(row, "TARGET_PATHWAY")
drugs[gdsc_id] = DrugDirectReactome(gdsc_id, name, targets, target_pathway)
else:
continue
return drugs
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response, drugbank_targets, reactome_targets):
"""Load all needed files by calling one function. All argumenst are filepaths to
corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
# DrugBank targets
map_drugs_to_drugbank_targets = pickle.load(open(drugbank_targets, "rb"))
# Reactome targets
map_drugs_to_reactome_targets = pickle.load(open(reactome_targets, "rb"))
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df, map_drugs_to_drugbank_targets, map_drugs_to_reactome_targets)
#################################################################################################################
# DrugWithGenesInSamePathways class
#################################################################################################################
class DrugWithGenesInSamePathways(DrugWithDrugBank):
"""Class representing drug from GDSC database.
Main function of the class is to create and store input data corresponding to a given
drug. Four types of data are considered: gene expression, copy number variants, coding variants and tumor
tissue type. Class instances are initialized with four basic drug properties: ID, name, gene targets and
target pathway. Data attributes are stored as pandas DataFrames and are filled using data files
from GDSC via corresponding methods.
In general, all the utilities are the same as in the basic Drug class, but with different input data.
As in DrugWithDrugBank, we inorporate genetic features related to target genes coming both from GDSC and
DrugBank, but in addition to this, we also consider every gene that belongs to the same pathways as original
target genes. In other words, we consider genes that belong to pathways which are picked based on a single
membership of any of the target genes.
Three of original methods are overloaded: __init__, "create_drugs" and "load_data". Furthermore, three new
attributes are introduced: "original_targets" which stores actual targets coming from GDSC and DrugBank, and
"related_genes" which stores genes that occur in the same pathways along with original targets.
Attributes:
gdsc_id (int): ID from GDSC website.
name (string): Drug name.
targets (list of strings): Drug's target gene names (HGNC).
target_pathway (string): Drug's target pathway as provided in GDSC annotations.
ensembl targets (list of strings): Drug's target genes ensembl IDs. Can have different length
than "targets" because some gene names may not be matched during mapping. Ensembl IDs are
needed for gene expression data.
map_from_hgnc_to_ensembl (dictionary): Dictionary mapping from gene names to ensembl IDs. Created
after calling the "load_mappings" method.
map_from_ensembl_to_hgnc (dictionary): Dictionary mapping from ensembl IDs to gene names. Created
after calling the "load_mappings" method.
total_no_samples_screened (int): Number of cell lines screened for that drug. Created after
calling the "extract_drug_response_data" method.
response_data (DataFrame): DataFrame with screened cell lines for that drug and corresponding AUC or
IC50 values. Created after calling the "extract_drug_response_data" method.
screened_cell_lines (list of ints): list containing COSMIC IDs representing cell lines screened for
that drug. Created after calling the "extract_screened_cell_lines" method.
gene_expression_data (DataFrame): DataFrame with gene expression data, considering only
target genes. Created after calling the "extract_gene_expression" method
mutation_data (DataFrame): DataFrame with binary calls for coding variants, considering only
target genes. Created after calling the "extract_mutation_data" method.
cnv_data (DataFrame): DataFrame with binary calls for copu number variants, considering only
target genes. Created after calling the "extract_cnv_data" method.
tissue_data (DataFrame): DataFrame with dummy encoded tumor tissue types in screened cell lines.
Dummy encoding results in 13 binary features. Created after calling the
"extract_tissue_data" method.
full_data (DataFrame): DataFrame with combined data coming from given set of genetic data
classes.
Methods:
Instance methods:
__init__: Initialize a Drug instance.
__repr__: Return string representation of an instance, as a command which can be used to create
this instance.
__str__: Return string representation of an instance.
extract_drug_response_data: Generate a DataFrame with drug-response data.
extract_screened_cell_lines: Generate a list of COSMIC IDs representing cell lines screened for that
drug.
extract_gene_expression: Generate a DataFrame with gene expression data for drug's screened cell lines
extract_mutation_data: Generate a DataFrame with binary calls for coding variants.
extract_cnv_data: Generate a DataFrame with binary calls for copy number variants.
extract_tissue_data: Generate a DataFrame with dummy encoded tissue types.
concatenate_data: Generate a DataFrame containing all desired genetic data classes. Available data
classes are: gene expression, coding variants, cnv variants and tissue type.
create_full_data: Combines above data extraction methods in order to create desired input data
for the drug with one method call. Returns the full data.
Class methods:
load_mappings: Load appropriate dictionaries mapping between ensembl and HGNC.
Static methods:
create_drugs: Create a dictionary of DrugWithDrugBank class objects, each referenced by it's ID
(keys are drug GDSC ID's). Includes also target data coming from DrugBank.
load_data: Load all needed data files as DataFrames with one function call.
"""
def __init__(self, gdsc_id, name, original_targets, related_genes, targets, target_pathway):
self.gdsc_id = gdsc_id
self.name = name
self.original_targets = original_targets
self.related_genes = related_genes
self.targets = targets
self.target_pathway = target_pathway
self.ensembl_targets = []
for x in self.targets:
try:
self.ensembl_targets.append(self.map_from_hgnc_to_ensembl[x])
except KeyError:
pass
def create_drugs(drug_annotations_df, drugbank_targets_mapping,
map_target_genes_to_same_pathways_genes):
"""Create a dictionary of DrugWithGenesInSamePathways class objects, each referenced by it's ID. Add
target data coming from DrugBank, as well as genes that occur in the same pathways as original target
genes.
Arguments:
drug_annotations_df (DataFrame): DataFrame of drug annotations from GDSC website.
drugbank_targets_mapping (dictionary): Dictionary with mapping from drug ID to it's
targets from drugbank database.
map_target_genes_to_same_pathways_genes (dictionary): Dictionary with mapping from target
genes names to gene names that occur in same pathways.
Returns:
Dictionary of DrugWithGenesInSamePathways objects as values and their ID's as keys.
"""
drugs = {}
for row in drug_annotations_df.itertuples(index=True, name="Pandas"):
# Extract name, ID and GDSC target pathway
name = getattr(row, "DRUG_NAME")
gdsc_id = getattr(row, "DRUG_ID")
target_pathway = getattr(row, "TARGET_PATHWAY")
# Extract original targets (for now just from GDSC)
original_targets = getattr(row, "TARGET").split(", ")
# Add genes from DrugBank (if drug is matched) to original targets list
if gdsc_id in drugbank_targets_mapping:
original_targets = list(set(original_targets + drugbank_targets_mapping[gdsc_id]))
# Extract list of related genes (that occur in same pathways but are not direct targets)
# from Reactome dictionary
related_genes = []
# Iterate over direct target genes
for target_gene in original_targets:
if target_gene in map_target_genes_to_same_pathways_genes:
# Genes related to one specific target
coocurring_genes = map_target_genes_to_same_pathways_genes[target_gene]
# Update the overall list of related genes for given drug
related_genes = related_genes + coocurring_genes
# Exclude genes that are original targets fro related genes
related_genes_final = []
for gene in related_genes:
if gene not in original_targets:
related_genes_final.append(gene)
# Remove duplicates from related_genes_final
related_genes_final = list(set(related_genes_final))
# Setup "targets" field as a sum of original targets and related genes
targets = original_targets + related_genes_final
assert len(targets) == len(original_targets) + len(related_genes_final)
# Finally, create actual DrugWithGenesInSamePathways instance
drugs[gdsc_id] = DrugWithGenesInSamePathways(gdsc_id, name, original_targets,
related_genes_final, targets, target_pathway)
return drugs
def load_data(drug_annotations, cell_line_list, gene_expr, cnv1, cnv2,
coding_variants, drug_response, drugbank_targets, pathway_occurence_genes):
"""Load all needed files by calling one function. All argumenst are filepaths to corrresponding files."""
# Drug annotations
drug_annotations_df = pd.read_excel(drug_annotations)
# Cell line annotations
col_names = ["Name", "COSMIC_ID", "TCGA classification", "Tissue", "Tissue_subtype", "Count"]
cell_lines_list_df = pd.read_csv(cell_line_list, usecols=[1, 2, 3, 4, 5, 6], header=0, names=col_names)
# Gene expression
gene_expression_df = pd.read_table(gene_expr)
# CNV
d1 = pd.read_csv(cnv1)
d2 = pd.read_table(cnv2)
d2.columns = ["genes_in_segment"]
def f(s):
return s.strip(",")
cnv_binary_df = d1.copy()
cnv_binary_df["genes_in_segment"] = d2["genes_in_segment"].apply(f)
# Coding variants
coding_variants_df = pd.read_csv(coding_variants)
# Drug-response
drug_response_df = pd.read_excel(drug_response)
# DrugBank targets
map_drugs_to_drugbank_targets = pickle.load(open(drugbank_targets, "rb"))
# Dictionary mapping from target genes to genes that occur in same pathways
map_target_genes_to_same_pathways_genes = pickle.load(
open(pathway_occurence_genes, "rb"))
return (drug_annotations_df, cell_lines_list_df, gene_expression_df, cnv_binary_df, coding_variants_df,
drug_response_df, map_drugs_to_drugbank_targets, map_target_genes_to_same_pathways_genes)
#################################################################################################################
# Experiment class
#################################################################################################################
class Experiment(object):
"""Class representing single machine learning experiment with GDSC data.
The class is mainly useful for storing experiment's results. Actual machine learning
is performed outside of the class. Data stored in dictonaries is filled during machine learning.
Other methods describing the results use those primal data dictonaries.
Attributes:
name (string): Name somehow summarizing the experiment.
algorithm (string): Name of the predictive algorithm used.
param_search_type (string): What kind of parameter search was used: exhaustive grid search
or randomized search.
data_normalization_type (string): What kind of data normalization was applied, on which features.
kfolds (int): How many folds were used during cross-validation hyperparameter tuning
split_seed (int): Seed (random_state) used when randomly splitting the data (using sklearn's
train_test_split). If None, no specific seed was set.
tuning_seed (int): Seed (random_state) used when doing hyperparameter tuning with sklearn's
RandomizedSearchCV.
input_data (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and DataFrames
containing corrresponding input data as values
best_scores (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and tuples of
achieved best prediction scores as values.
cv_results: (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and full cross-validation
results as values
best_parameters (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and best found
hyperparameters as values
dummy_scores (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and corresponding
dummy scores as values
data_stds (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and standard deviations
of response variable as values
coefficients (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and weights of the best
found model as values.
training_scores (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and scores
obtained on the training set as values.
data_shapes (dictionary): Dictionary with (drug.name, drug.gdsc_id) pairs as keys and tuples of input
data shapes as values.
results_df (DataFrame): DataFrame with results of the Experiment for each considered drug.
Methods:
Instance methods:
__init__: Initializer.
__repr__: Return short string representation of an instance.
__str__: Return more detailed description of an experiment.
create_results_df: Wrap results from dictonaries into single pandas DataFrame
results_summary_single: Display numeric summary of Experiment's results.
boxplot_of_performance_single: Create boxplot of a given perfromance metric across all drugs.
barplot_of_rmses_single: Generate barplots of model test RMSEs along with dummy RMSE for each drug,
for single Experiment.
boxplots_rel_rmse_wrt_pathways: Generate boxplots of relative RMSE wrt. target pathways.
plot_one_vs_another: Create regular plot of first variable vs. another.
plot_pathway_distribution: Plot overall pathway counts in this Experiment.
list_of_better_drugs: Compute list of drugs which performed better in the "other" Experiment.
plot_pathway_distribution_in_better_drugs: Plot target pathway counts in better performing drugs
in "other" Experiment.
merge_two_exp_results: Take other Eperiment object and horizontally merge two corresponding result
DataFrames.
results_for_better_drugs: Compute DataFrame containing comparisons between two Experiments along
drugs that performed better in the other one.
barplots_of_rmse_for_two_experiments: Plot barplots of test RMSE or relative test RMSE for each drug.
boxplots_of_performance_for_two_experiments: Create boxplots of a given performance metric for two
Experiments.
Static methods:
create_input_for_each_drug: Take in the dictionary with drug_id: Drug object pairs and create
input data for each Drug.
"""
# Instance methods
def __init__(self, name, algorithm, parameter_search_type, data_normalization_type,
kfolds, split_seed=None, tuning_seed=None):
"""Experiment class initializer.
Attributes other than name will be filled during actual learning.
Arguments:
name (string): Name somehow summarizing the experiment.
algorithm (string): Name of the predictive algorithm used.
param_search_type (string): What kind of parameter search was used: exhaustive grid search
or randomized search.
data_normalization_type (string): What kind of data normalization was applied, on which features.
kfolds (int): How many folds were used during cross-validation hyperparameter tuning
split_seed (int): Seed (random_state) used when randomly splitting the data (using sklearn's
train_test_split). If None, no specific seed was set.
tuning_seed (int): Seed (random_state) used when doing hyperparameter tuning with sklearn's
RandomizedSearchCV.
Returns:
None
"""
# General characteristics
self.name = name
self.algorithm = algorithm
self.param_search_type = parameter_search_type
self.data_normalization_type = data_normalization_type
self.kfolds = kfolds
self.split_seed = split_seed
self.tuning_seed = tuning_seed
# Results storage dictonaries
self.input_data = {}
self.best_scores = {}
self.cv_results = {}
self.best_parameters = {}
self.dummy_scores = {}
self.data_stds = {}
self.coefficients = {}
self.training_scores = {}
self.data_shapes = {}
def __repr__(self):
"""Return short string representation of an object."""
return 'Experiment("{}")'.format(self.name)
def __str__(self):
"""Return more detailed description of an experiment."""
return self.name
def create_results_df(self, drug_annotations_df):
"""Wrap results in storage dictionaries into single DataFrame.
Assign resulting DataFrame into instance attribute (results_df).
Arguments:
drug_annotations_df (DataFrame): list of drugs in GDSC
Returns:
None
"""
# Initialize DataFrame to fill
df = pd.DataFrame()
# Initialize lists (columns) we want to have
drug_names = []
drug_ids = []
model_test_RMSES = []
model_CV_RMSES = []
model_test_correlations = []
model_test_corr_pvals = []
dummy_test_RMSES = []
dummy_CV_RMSES = []
y_stds = []
model_train_RMSES = []
numbers_of_samples = []
numbers_of_features = []
target_pathways = []
# Fill the columns lists
for name, ide in self.best_scores:
drug_names.append(name)
drug_ids.append(ide)
model_test_RMSES.append(self.best_scores[(name, ide)].test_RMSE)
model_CV_RMSES.append((-self.best_scores[(name, ide)].cv_best_score) ** 0.5)
model_test_correlations.append(self.best_scores[(name, ide)].test_correlation[0])
model_test_corr_pvals.append(self.best_scores[(name, ide)].test_correlation[1])
dummy_test_RMSES.append(self.dummy_scores[(name, ide)].test_RMSE)
dummy_CV_RMSES.append(self.dummy_scores[(name, ide)].cv_RMSE)
y_stds.append(self.data_stds[(name, ide)].overall)
model_train_RMSES.append(self.training_scores[(name, ide)].training_RMSE)
numbers_of_samples.append(self.data_shapes[(name, ide)][0])
numbers_of_features.append(self.data_shapes[(name, ide)][1] - 14)
target_pathways.append(
drug_annotations_df[drug_annotations_df["DRUG_ID"] == ide]["TARGET_PATHWAY"].iloc[0])
# Insert lists as a DataFrame columns
df.insert(0, "Drug Name", drug_names)
df.insert(1, "Drug ID", drug_ids)
df.insert(2, "Target Pathway", target_pathways)
df.insert(3, "Model test RMSE", model_test_RMSES)
df.insert(4, "Model CV RMSE", model_CV_RMSES)
df.insert(5, "Model test correlation", model_test_correlations)
df.insert(6, "Model test pval", model_test_corr_pvals)
df.insert(7, "Dummy test RMSE", dummy_test_RMSES)
df.insert(8, "Dummy CV RMSE", dummy_CV_RMSES)
df.insert(9, "STD in Y", y_stds)
df.insert(10, "Model train RMSE", model_train_RMSES)
df.insert(11, "Number of samples", numbers_of_samples)
df.insert(12, "Number of features", numbers_of_features)
df.insert(13, "Relative test RMSE", df["Dummy test RMSE"] / df["Model test RMSE"])
self.results_df = df
def results_summary_single(self):
"""Display numeric summary of Experiment's results.
Arguments:
None
Returns:
None
"""
print(self.name)
print("Mean and median test RMSE:", round(self.results_df["Model test RMSE"].mean(), 4),
round(self.results_df["Model test RMSE"].median(), 4))
print('Mean and median test correlation:', round(self.results_df["Model test correlation"].mean(), 4),
round(self.results_df["Model test correlation"].median(), 4))
# Plots of result from single Experiment
def boxplot_of_performance_single(self, metric, title="Predictive performance", figsize = (6, 6),
title_size=25, label_size=20,
tick_size=20, save_directory=None):
"""Creates boxplot of a given perfromance metric across all drugs.
Arguments:
metric (string): which variable to plot, must be one of the column names in self.results_df
title (string): title of the plot
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.title(title, fontsize=title_size)
plt.grid()
sns.boxplot(x=self.results_df[metric], orient="v")
plt.xlabel("", fontsize=label_size)
plt.ylabel(metric, fontsize=label_size)
if save_directory:
plt.savefig(save_directory, bbox_inches='tight')
plt.show()
def barplot_of_rmses_single(self, figsize=(35, 12), title_size=45, label_size=30, tick_size=20,
half=1, save_directory=None):
"""Generate barplots of model test RMSEs along with dummy RMSE for each drug, for single
Experiment.
Arguments:
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
half (int): which half of the drugs to plot. If 0, all available drugs are plotted
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Set up legend parameters
params = {"legend.fontsize": 25,
"legend.handlelength": 2}
plt.rcParams.update(params)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("Cross-validation RMSE for each drug", fontsize=title_size)
ax.set_xlabel("Drug", fontsize=label_size)
ax.set_ylabel("RMSE", fontsize=label_size)
# Set up DataFrame slicing
if half == 1:
start_idx = 0
end_idx = self.results_df.shape[0] // 2
elif half == 2:
start_idx = self.results_df.shape[0] // 2
end_idx = self.results_df.shape[0]
else:
start_idx = 0
end_idx = self.results_df.shape[0]
self.results_df.sort_values("Model test RMSE", ascending = False).iloc[start_idx:end_idx].plot(
x = "Drug Name", y = ["Model test RMSE", "Dummy test RMSE"] ,kind="bar", ax = ax, width = 0.7,
figsize=figsize, fontsize=tick_size, legend = True, grid = True)
if save_directory:
plt.savefig(save_directory)
plt.show()
def boxplots_rel_rmse_wrt_pathways(self, figsize=(30, 12), title_size=35, label_size=30,
tick_size=25, save_directory=None):
"""Generate boxplots of relative RMSE wrt. target pathways.
Arguments:
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Set up order list for plotting
order_list = list(self.results_df.groupby("Target Pathway").agg({"Relative test RMSE": np.median}).sort_values(
"Relative test RMSE").sort_values( "Relative test RMSE", ascending = False).index)
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.xticks(rotation="vertical")
plt.title("Relative test RMSE with respect to drug's target pathway", fontsize=title_size)
plt.grid()
plt.gcf().subplots_adjust(bottom=0.53)
sns.boxplot(x = "Target Pathway", y = "Relative test RMSE", data = self.results_df, order=order_list)
plt.ylabel("Relative test RMSE", fontsize=label_size)
plt.xlabel("Target pathway", fontsize=label_size)
if save_directory:
plt.savefig(save_directory)
plt.show()
def plot_one_vs_another(self, first, second, title, hline=False, figsize=(15, 10), title_size=30, label_size=25,
tick_size=15, save_directory=None):
"""Create regular plot of first variable vs. another.
Arguments:
first (string): first variable to plot, must be one of the column names in self.results_df
second(string): second variable to plot, must be one of the column names in self.results_df
hline (bool): do we want a horizontal line at one
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
plt.figure(figsize=figsize)
plt.title(title, fontsize=title_size)
plt.xlabel(second, fontsize=label_size)
plt.ylabel(first, fontsize=label_size)
plt.tick_params(labelsize = tick_size)
if hline:
plt.axhline(y=1.0, xmin=0, xmax=1.2, color="black", linewidth=2.5)
sns.regplot(x=second, y=first, data=self.results_df)
if save_directory:
plt.savefig(save_directory)
plt.show()
def plot_pathway_distribution(self, figsize=(15, 8), title_size=30, label_size=25,
tick_size=15, save_directory=None):
"""Plot overall pathway counts in this Experiment.
Arguments:
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
plt.figure(figsize=figsize)
plt.title("Target Pathway counts", fontsize=title_size)
plt.xlabel("Target Pathway", fontsize=label_size)
sns.countplot(x="Target Pathway", data=self.results_df)
plt.ylabel("Count", fontsize=label_size)
plt.xticks(rotation='vertical')
plt.tick_params("both", labelsize=tick_size)
plt.gcf().subplots_adjust(bottom=0.45)
if save_directory:
plt.savefig(save_directory)
plt.show()
# Comparisons between two Experiments
def list_of_better_drugs(self, other, rmse_type="relative", rmse_fraction=0.8,
correlation_fraction=None, correlation_threshold=0.25):
"""Compute list of drugs which performed better in the "other" experiment.
Arguments:
other (Experiment): other considered Experiment object
rmse_type (string): "relative" or "absolute" - whether to use relative or absolute RMSE in comparison
rmse_fraction (float): fraction by which other RMSE should be better for drug to
be considered as "better". If rmse_type is "relative", fraction should be greater than 1.,
less than 1. otherwise.
correlation_fraction (float, default None): fraction by which other correlation should be better for
drug to be considered as "better". Should be greater thatn 1. If None, this condidion is not applied.
correlation_threshold (float): correlation that needs to be achieved in order
for drug to be "better"
Returns:
list if better drugs IDs
"""
if rmse_type == "relative":
column = "Relative test RMSE"
if rmse_type == "absolute":
column = "Model test RMSE"
better_drugs = []
for drug_id in self.results_df["Drug ID"].unique():
self_df = self.results_df[self.results_df["Drug ID"] == drug_id]
other_df = other.results_df[other.results_df["Drug ID"] == drug_id]
if other_df.shape[0] < 1: # Make sure of other Experiment contains results for this drug
continue
# Extract data of interest
self_rmse = self_df[column].iloc[0]
self_corr = self_df["Model test correlation"].iloc[0]
other_rmse = other_df[column].iloc[0]
other_corr = other_df["Model test correlation"].iloc[0]
other_relative = other_df["Relative test RMSE"].iloc[0]
# Classify as good or bad
if rmse_type == "relative": # Need to distungish two cases because the higher relative RMSE the better
if correlation_fraction:
if (other_rmse > rmse_fraction * self_rmse) and (other_relative > 1.) \
and (other_corr > correlation_threshold) and (other_corr > correlation_fraction * self_corr):
better_drugs.append(drug_id)
else:
if (other_rmse > rmse_fraction * self_rmse) and (other_relative > 1.) \
and (other_corr > correlation_threshold):
better_drugs.append(drug_id)
if rmse_type == "absolute": # The lower absolute RMSE the better
if correlation_fraction:
if (other_rmse < rmse_fraction * self_rmse) and (other_relative > 1.) \
and (other_corr > correlation_threshold) and (other_corr > correlation_fraction * self_corr):
better_drugs.append(drug_id)
else:
if (other_rmse > rmse_fraction * self_rmse) and (other_relative > 1.) \
and (other_corr > correlation_threshold):
better_drugs.append(drug_id)
return better_drugs
def plot_pathway_distribution_in_drugs_intersection(self, other, figsize=(15, 8), title_size=30, label_size=25,
tick_size=15, save_directory=None):
"""Plot pathway distribution in this (self) Experiment across drugs, but only consider drugs common
with "other" Experiment.
Arguments:
other (Experiment): other considered Experiment object
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Compute intersection in IDs
intersection = list(self.merge_two_exp_results(
other, ["a", "b"], "flag")["Drug ID"].unique())
# Actual plot
plt.figure(figsize=figsize)
plt.title("Target Pathway counts", fontsize=title_size)
plt.xlabel("Target Pathway", fontsize=label_size)
sns.countplot(x="Target Pathway", data=self.results_df[self.results_df["Drug ID"].isin(intersection)])
plt.ylabel("Count", fontsize=label_size)
plt.xticks(rotation='vertical')
plt.tick_params("both", labelsize=tick_size)
plt.gcf().subplots_adjust(bottom=0.45)
if save_directory:
plt.savefig(save_directory)
plt.show()
# Plots of results from two Experiments
def plot_pathway_distribution_in_better_drugs(self, other, rmse_type="relative",
rmse_fraction=0.8, correlation_fraction=None,
correlation_threshold=0.25,
figsize=(15, 8), title_size=30,
label_size=25, tick_size=15, save_directory=None):
"""Plot target pathway counts in better performing drugs.
Arguments:
other (Experiment): other considered Experiment object
rmse_type (string): "relative" or "absolute" - whether to use relative or
absolute RMSE in comparison
rmse_fraction (float): fraction by which other RMSE should be better for drug to
be considered as "better". If rmse_type is "relative", fraction should be greater than 1.,
less than 1. otherwise.
correlation_fraction (float, default None): fraction by which other correlation should be better for
drug to be considered as "better". Should be greater thatn 1. If None, this condidion is not applied.
correlation_threshold (float): correlation that needs to be achieved in order
for drug to be "better"
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Compute list of better drugs
better_drugs = self.list_of_better_drugs(other, rmse_type, rmse_fraction, correlation_fraction,
correlation_threshold)
plt.figure(figsize=figsize)
plt.title("Target Pathway counts", fontsize=title_size)
plt.xlabel("Target Pathway", fontsize=label_size)
plt.ylabel("Count", fontsize=label_size)
sns.countplot(x="Target Pathway", data=other.results_df[other.results_df["Drug ID"].isin(better_drugs)
])
plt.xticks(rotation='vertical')
plt.tick_params("both", labelsize=tick_size)
plt.gcf().subplots_adjust(bottom=0.45)
if save_directory:
plt.savefig(save_directory)
plt.show()
def merge_two_exp_results(self, other, flags, feature_name):
"""Take other Eperiment object and horizontally merge two corresponding result
DataFrames.
Arguments:
other (Experiment): other considered Experiment object
flags (list of strings): flags assigned to DataFrame entries from given object. First element
should be for this (self) instance
feature_name (string): name of a column that differentiate between two results sets
Returns:
None
"""
# Since the whole point is to compare results for the same drugs, first let's find
# intersection between two DataFrames in terms of Drug ID
ids_intersection = list(self.results_df[
self.results_df["Drug ID"].isin(list(other.results_df["Drug ID"].unique()))]["Drug ID"].unique())
# Create intersected DataFrames
self_intersected_df = self.results_df[self.results_df["Drug ID"].isin(ids_intersection)]
other_intersected_df = other.results_df[other.results_df["Drug ID"].isin(ids_intersection)]
assert self_intersected_df.shape[0] == other_intersected_df.shape[0]
# Create new DataFrame with merged results
# First, assign flags to self DataFrame entries
flag_list = [flags[0]] * self_intersected_df.shape[0]
self_intersected_df[feature_name] = flag_list
# Second DataFrame
flag_list = [flags[1]] * other_intersected_df.shape[0]
other_intersected_df[feature_name] = flag_list
# Concatenate both DataFrames
total_results_df = pd.concat([self_intersected_df, other_intersected_df], axis=0)
return total_results_df
def results_for_better_drugs(self, other, flags, feature_name, rmse_type="relative",
rmse_fraction=0.8, correlation_fraction=None, correlation_threshold=0.2):
"""Compute DataFrame containing comparisons between two Experiments along drugs
that performed better in the other one.
Arguments:
other (Experiment): other considered Experiment object
flags (list of strings): flags assigned to DataFrame entries from given object. First element
should be for this (self) instance
feature_name (string): name of a column that differentiate between two results sets
rmse_type (string): "relative" or "absolute" - whether to use relative or absolute RMSE in comparison
rmse_fraction (float): fraction by which other RMSE should be better for drug to
be considered as "better". If rmse_type is "relative", fraction should be greater than 1.,
less than 1. otherwise.
correlation_fraction (float, default None): fraction by which other correlation should be better for
drug to be considered as "better". Should be greater thatn 1. If None, this condidion is not applied.
correlation_threshold (float): correlation that needs to be achieved in order
for drug to be "better"
Returns:
None
"""
# Compute DataFrame containing results from both Experiments
total_df = self.merge_two_exp_results(other, flags, feature_name)
# Extract data only for better performing drugs
better_drugs = self.list_of_better_drugs(other, rmse_type, rmse_fraction, correlation_fraction,
correlation_threshold)
return total_df[total_df["Drug ID"].isin(better_drugs)].sort_values("Drug ID")
def barplots_of_performance_for_two_experiments(self, other, flags, feature_name, metric,
figsize=(35, 12), title_size=45, label_size=30,
tick_size=20, half=1, save_directory=None):
"""Plot barplots of a given performance metric for each drug. Two barplots for each drug
coming from two different Experiments.
Arguments:
other (Experiment): other considered Experiment object
flags (list of strings): flags assigned to DataFrame entries from given object. First element
should be for this (self) instance
feature_name (string): name of a column that differentiate between two results sets
metric (string): which metric to plot
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
half (int): which half of the drugs to plot. If 0, all available drugs are plotted
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Compute DataFrame containing results from both Experiments
total_df = self.merge_two_exp_results(other, flags, feature_name)
# Determine which half of the drugs to plot
if half == 1:
start_idx = 0
end_idx = len(total_df["Drug ID"].unique()) // 2
elif half == 2:
start_idx = len(total_df["Drug ID"].unique()) // 2
end_idx = len(total_df["Drug ID"].unique())
else:
start_idx = 0
end_idx = len(total_df["Drug ID"].unique())
drugs_to_plot = list(total_df["Drug ID"].unique())[start_idx:end_idx]
# Actual plotting
# Legend parameters
params = {"legend.fontsize": 25,
"legend.handlelength": 2}
plt.rcParams.update(params)
fig = plt.figure(figsize=figsize)
title = metric + " for each drug"
plt.title(title, fontsize=45)
plt.xlabel("Drug name", fontsize=30)
plt.ylabel(metric, fontsize=30)
sns.barplot("Drug Name", metric, hue=feature_name,
data=total_df[
total_df["Drug ID"].isin(drugs_to_plot)].sort_values(
metric, ascending=False))
plt.xticks(rotation='vertical')
plt.tick_params("both", labelsize=20)
plt.legend(title = "")
plt.gcf().subplots_adjust(bottom=0.25)
if save_directory:
plt.savefig(save_directory)
plt.show()
def boxplots_of_performance_for_two_experiments(self, other, flags, feature_name, metric,
title="Predictive performance",
figsize = (8, 6), title_size=25,
label_size=20, tick_size=18, save_directory=None):
"""Create boxplots of a given performance metric for two Experiments, across
all drugs.
Arguments:
other (Experiment): other considered Experiment object
flags (list of strings): flags assigned to DataFrame entries from given object. First element
should be for this (self) instance
feature_name (string): name of a column that differentiate between two results sets
metric (string): which variable to plot, must be one of the column names in self.results_df
title (string): title of the plot
figsize (tuple): size of the figure
title_size (int): font size of the title
label_size (int): font size of axis labels
tick_size (int): font size of axis ticks
save_directory (string or None): directory to which save the plot. If None,
plot is not saved
Returns:
None
"""
# Compute DataFrame containing results from both Experiments
total_df = self.merge_two_exp_results(other, flags, feature_name)
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.title(title, fontsize = title_size)
plt.grid()
sns.boxplot(x = feature_name, y = metric, data=total_df)
plt.xlabel("", fontsize=label_size)
plt.ylabel(metric, fontsize=label_size)
if save_directory:
plt.savefig(save_directory)
plt.show()
# Static methods
@staticmethod
def create_input_for_each_drug(drug_dict, drug_response_df, data_combination,
gene_expression_df=None, cnv_binary_df=None,
map_cl_id_and_feature_to_status=None,
cell_lines_list_df=None, coding_variants_df=None,
merck_signatures_df=None,
feat_threshold=None, log=False,
metric="AUC"):
"""Take in the dictionary with drug_id: Drug object pairs and create input data
for each Drug.
Arguments:
drug_dict (dictionary): dictionary with drug_id: Drug object pairs
drug_response_df (DataFrame): DataFrame of drug response data from GDSC
gene_expression_df (DataFrame): original GDSC gene expression DataFrame
cnv_binary_df (DataFrame): DataFrame from GDSC download tool with CNV data
cell_line_list_df (DataFrame): cell line list from GDSC
coding_variants_df (DataFrame): DataFrame with original mutation calls from GDSC
data_combination (list of strings): determines which types of features to include, valid
chocies are: "CNV", "mutation", "expression" and "tissue"
feat_thresold (int): if not None, count number of Drugs with number of features greater
than feat_threshold
log (bool): provide or not the progress display
Returns:
None
"""
s = 0
c = 0
sum_of_features = 0
for ide in drug_dict:
drug = drug_dict[ide] # Current Drug object
df = drug.create_full_data(drug_response_df, gene_expression_df=gene_expression_df,
cnv_binary_df=cnv_binary_df,
map_cl_id_and_feature_to_status=map_cl_id_and_feature_to_status,
cell_line_list=cell_lines_list_df,
mutation_df=coding_variants_df,
merck_signatures_df=merck_signatures_df,
data_combination=data_combination,
metric=metric
)
if feat_threshold:
if df.shape[1] >= feat_threshold:
s += 1
sum_of_features += (df.shape[1] - 14)
c +=1
if c % 10 == 0 and log:
print(c, "drugs done")
if feat_threshold:
print("Number of drugs with number of features bigger than {}: {}".format(
feat_threshold, s))
if log:
print("Mean number of features in {} drugs: {}".format(c, sum_of_features / c))
###############################################################################################
# Modeling class
###############################################################################################
class Modeling(object):
"""Basic class designed for performing machine learning experiments. This class alone doesn't involve
any feature selection method.
"""
def __init__(self, name, param_grid, estimator_seeds, split_seeds, n_combinations=20, kfolds=5,
rforest_jobs=None, rforest_refit_jobs=None, max_iter=1000, tuning_jobs=None,
scoring="neg_mean_squared_error", test_size=0.3):
""" Modeling object initializer.
Arguments:
name (string): Name describing the experiment.
param_grid (dict): Grid of parameters to search on during hyperparameter tuning.
estimator_seeds (list of ints): Random States for predictive algorithms used.
split_seeds (list of ints): Seeds for data splits.
n_combinations (int): Number of parameters to try during hyperparameter tuning.
kfolds (int): Number of folds of cross validation during hyperparameter tuning.
rforest_jobs (int): Number of cores to use during fitting and predicting with
Random Forest (during cross-validation hyperparamater tuning).
rforest_refit_jobs (int): Number of cores to use during refitting RandomForest
after cross-validation hyperparamater tuning.
tuning_jobs (int): Number of cores to use during cross-validation.
scoring (string): Function to optimize during parameter tuning.
test_size (float): Fraction of whole data spent on test set.
Returns:
ModelingWithFeatureSelection object.
"""
if len(estimator_seeds) != len(split_seeds):
raise ValueError("Random forest seeds and split seeds must have the same length")
self.name = name
self.param_grid = param_grid
self.estimator_seeds = estimator_seeds
self.split_seeds = split_seeds
self.n_combinations = n_combinations
self.kfolds = kfolds
self.rforest_jobs = rforest_jobs
self.rforest_refit_jobs = rforest_refit_jobs
self.max_iter = max_iter
self.tuning_jobs = tuning_jobs
self.scoring = scoring
self.test_size = test_size
def enet_fit_single_drug(self, X, y, tuning_seed=None,
enet_seed=None, verbose=0, refit=True):
"""Perform single modeling given input data and labels, using Elastic Net regression.
Modeling consists of hyperparameter tuning with kfold cross-validation and fitting
a model with best parameters on whole training data.
Arguments:
X (DataFrame): Training input data.
y (Series): Training response variable.
tuning_seed (int): Random State for parameter tuning.
rforest_seed (int): Random State of Elastic Net Model.
verbose (int): Controls verbosity of RandomizedSearchCV.
Returns:
grid: Fitted (if refit is True) RandomizedSearchCV object.
"""
# Set elements of the pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = ElasticNet(random_state=enet_seed, max_iter=self.max_iter)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Setup RandomizedSearchObject
grid = model_selection.RandomizedSearchCV(main_pipeline, param_distributions=self.param_grid,
n_iter=self.n_combinations, scoring=self.scoring,
cv=self.kfolds,
random_state=tuning_seed,
n_jobs=self.tuning_jobs,
verbose=verbose,
pre_dispatch='2*n_jobs',
refit=refit)
# Fit the grid
grid.fit(X, y)
return grid
def rf_fit_single_drug(self, X, y, tuning_seed=None,
rforest_seed=None, verbose=0, refit=True):
"""Perform single modeling given input data and labels.
Modeling consists of hyperparameter tuning with kfold cross-validation and fitting
a model with best parameters on whole training data.
Arguments:
X (DataFrame): Training input data.
y (Series): Training response variable.
tuning_seed (int): Random State for parameter tuning.
rforest_seed (int): Random State of Random Forest Model.
verbose (int): Controls verbosity of RandomizedSearchCV.
Returns:
grid: Fitted RandomizedSearchCV object.
"""
# Set elements of the pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = RandomForestRegressor(random_state=rforest_seed, n_jobs=self.rforest_jobs)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Setup RandomizedSearchObject
grid = model_selection.RandomizedSearchCV(main_pipeline, param_distributions=self.param_grid,
n_iter=self.n_combinations, scoring=self.scoring,
cv=self.kfolds,
random_state=tuning_seed,
n_jobs=self.tuning_jobs,
verbose=verbose,
pre_dispatch='2*n_jobs',
refit=refit)
# Fit the grid
grid.fit(X, y)
return grid
def evaluate_single_drug(self, grid, X, y):
"""Evaluate provided fitted (or not) model on a test data.
Arguments:
grid (RandomizedSearchCV object): Previously fitted best model.
X (DataFrame): Test input data.
y (Series): Test response variable.
Returns:
model_test_scores (namedtuple): Results on a test set.
grid.cv_results_ (dict): Full results of parameter tuning with cross-validation.
grid.best_params_ (dict): Parameters of best found model.
"""
if grid.refit:
pred = grid.predict(X)
# Record results as named tuple
ModelTestScores = collections.namedtuple("ModelTestScores", ["cv_best_RMSE", "test_RMSE",
"test_explained_variance", "test_correlation"])
model_test_scores = ModelTestScores((-grid.best_score_) ** 0.5,
metrics.mean_squared_error(y, pred) ** 0.5,
metrics.explained_variance_score(y, pred),
pearsonr(y, pred))
return model_test_scores, grid.cv_results_, grid.best_params_
# If grid is not refitted, we just want the CV results
else:
return (-grid.best_score_) ** 0.5, grid.cv_results_, grid.best_params_
def fit_and_evaluate(self, X_train, y_train, X_test, y_test, tuning_seed=None,
rforest_seed=None, verbose=0, test_set=True):
"""Perform fitting and evaluation model in a single method (using Random Forest). Modeling
consists of hyperparameter tuning with cross-validation and fitiing the model on whole
training data. Evaluation is done either on separate test set or on cross-validation.
Arguments:
X_train (array): Training input data.
y_train (Series): Training labels.
X_test (array): Test input data.
y_test (Series): Test labels.
tuning_seed (int): Random State of RandomizedSearch object.
rforest_seed (int): Random State of RandomForestRegressor.
verbose (int): Controls the verbosity of RandomizedSearch objects.
test_set (bool): Whether or not evaluate on a separate test set.
Returns:
(if test_set):
model_test_scores (namedtuple): Results on a test set.
grid.cv_results_ (dict): Full results of parameter tuning with cross-validation.
grid.best_params_ (dict): Parameters of best found model.
(else):
grid.best_score (namedtuple): Best result obtained during CV.
grid.cv_results_ (dict): Full results of parameter tuning with cross-validation.
grid.best_params_ (dict): Parameters of best found model.
"""
# Set elements of the pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = RandomForestRegressor(random_state=rforest_seed, n_jobs=self.rforest_jobs)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Setup RandomizedSearchObject
grid = model_selection.RandomizedSearchCV(main_pipeline, param_distributions=self.param_grid,
n_iter=self.n_combinations, scoring=self.scoring,
cv=self.kfolds,
random_state=tuning_seed,
n_jobs=self.tuning_jobs,
verbose=verbose,
pre_dispatch='2*n_jobs',
refit=False)
# Fit the grid
grid.fit(X_train, y_train)
# Parse best params to pipeline
main_pipeline.set_params(**grid.best_params_)
main_pipeline.named_steps["estimator"].n_jobs = self.rforest_refit_jobs
if test_set:
# Refit on whole training data
main_pipeline.fit(X_train, y_train)
pred = grid.predict(X_test)
# Record results as named tuple
ModelTestScores = collections.namedtuple("ModelTestScores", ["cv_best_RMSE", "test_RMSE",
"test_explained_variance", "test_correlation"])
model_test_scores = ModelTestScores((-grid.best_score_) ** 0.5,
metrics.mean_squared_error(y_test, pred) ** 0.5,
metrics.explained_variance_score(y_test, pred),
pearsonr(y_test, pred))
return model_test_scores, grid.cv_results_, grid.best_params_
else:
return (-grid.best_score_) ** 0.5, grid.cv_results_, grid.best_params_
def evaluate_dummy_model_single_split(self, X_train, X_test, y_train, y_test):
"""Fit and evaluate the performance of dummy model, only on the test set."""
# Set up and fit Dummy Regressor
dummy = DummyRegressor()
dummy.fit(X_train, y_train)
# Get dummy predictions on the test set
dummy_preds = dummy.predict(X_test)
# Performance of dummy model as namedtuple
DummyScores = collections.namedtuple("DummyScores", ["test_RMSE", "test_explained_variance",
"test_correlation"])
dummy_performance = DummyScores(metrics.mean_squared_error(y_test, dummy_preds) ** 0.5,
metrics.explained_variance_score(y_test, dummy_preds),
pearsonr(y_test, dummy_preds))
return dummy_performance
def enet_model_over_data_splits(self, X, y, verbose=0, log=False):
"""Perform full modeling over data splits, using ElasticNet. Modeling involves hyperparameter
tuning with cross-validation, training on whole training data, and evaluating on the test set.
This process is repeated for few data splits (random states of data splits are contained
in self.split_seeds).
Arguments:
X (array): Input data (whole, not just the test set).
y (Series): Labels.
verbose (int): Controls the verbosity of RandomizedSearch object.
log (bool): Controls information display.
Returns:
results_for_splits (dict): Dictionary with data split seeds as keys and namedtuples
with results as values.
cv_results_for_splits (dict): Dictionary with data split seeds as keys and full
results of CV as values.
best_parameters_for_splits (dict): Dictionary with data split seeds as keys and
parameters of best found model as values.
dummy_for_splits (dict): Dictionary with data split seeds as keys and results
of dummy model as values.
tuning_seeds_for_splits (dict): Dictionary with data split seeds as keys and random
states of RandomizedSearch objects as values.
"""
# Initialize dictionary for storage of test results for given split
results_for_splits = {}
# Initialize dictionary with full results of cross-validation
cv_results_for_splits = {}
# Initialize dictonary with best parameters
best_parameters_for_splits = {}
# Initialize dictionary with dummy performance
dummy_for_splits = {}
# Initialize dictionary for tuning seed storage
tuning_seeds_for_splits = {}
# Initialize list of tuning seeds
tuning_seeds = np.random.randint(0, 101, size=len(self.split_seeds))
# Iterate over split seeds
for i in range(len(self.split_seeds)):
if log:
print("Modeling for {} out of {} data splits".format(i + 1, len(self.split_seeds)))
print()
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Record tuning seed
tuning_seeds_for_splits[self.split_seeds[i]] = tuning_seeds[i]
# Evaluate dummy
dummy_for_splits[self.split_seeds[i]] = self.evaluate_dummy_model_single_split(
X_train, X_test, y_train, y_test)
# Fit the model
grid = self.enet_fit_single_drug(X_train, y_train, tuning_seed=tuning_seeds[i],
enet_seed=self.estimator_seeds[i], verbose=verbose)
# Evaluate the model
model_test_scores, cv_results, best_parameters = self.evaluate_single_drug(grid, X_test, y_test)
# Record the results
results_for_splits[self.split_seeds[i]] = model_test_scores
cv_results_for_splits[self.split_seeds[i]] = cv_results
best_parameters_for_splits[self.split_seeds[i]] = best_parameters
if log:
print("Modeling done for all splits")
return (results_for_splits, cv_results_for_splits, best_parameters_for_splits,
dummy_for_splits, tuning_seeds_for_splits)
def rf_model_over_data_splits(self, X, y, verbose=0, log=False):
"""Perform full modeling over data splits, using RandomForest. Modeling involves hyperparameter
tuning with cross-validation, training on whole training data, and evaluating on the test set.
This process is repeated for few data splits (random states of data splits are contained
in self.split_seeds).
Arguments:
X (array): Input data (whole, not just the test set).
y (Series): Labels.
verbose (int): Controls the verbosity of RandomizedSearch object.
log (bool): Controls information display.
Returns:
results_for_splits (dict): Dictionary with data split seeds as keys and namedtuples
with results as values.
cv_results_for_splits (dict): Dictionary with data split seeds as keys and full
results of CV as values.
best_parameters_for_splits (dict): Dictionary with data split seeds as keys and
parameters of best found model as values.
dummy_for_splits (dict): Dictionary with data split seeds as keys and results
of dummy model as values.
tuning_seeds_for_splits (dict): Dictionary with data split seeds as keys and random
states of RandomizedSearch objects as values.
"""
# Initialize dictionary for storage of test results for given split
results_for_splits = {}
# Initialize dictionary with full results of cross-validation
cv_results_for_splits = {}
# Initialize dictonary with best parameters
best_parameters_for_splits = {}
# Initialize dictionary with dummy performance
dummy_for_splits = {}
# Initialize dictionary for tuning seed storage
tuning_seeds_for_splits = {}
# Initialize list of tuning seeds
tuning_seeds = np.random.randint(0, 101, size=len(self.split_seeds))
# Iterate over split seeds
for i in range(len(self.split_seeds)):
if log:
print("Modeling for {} out of {} data splits".format(i + 1, len(self.split_seeds)))
print()
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Record tuning seed
tuning_seeds_for_splits[self.split_seeds[i]] = tuning_seeds[i]
# Evaluate dummy
dummy_for_splits[self.split_seeds[i]] = self.evaluate_dummy_model_single_split(
X_train, X_test, y_train, y_test)
# Fit the model
grid = self.rf_fit_single_drug(X_train, y_train, tuning_seed=tuning_seeds[i],
rforest_seed=self.estimator_seeds[i], verbose=verbose)
# Evaluate the model
model_test_scores, cv_results, best_parameters = self.evaluate_single_drug(grid, X_test, y_test)
# Record the results
results_for_splits[self.split_seeds[i]] = model_test_scores
cv_results_for_splits[self.split_seeds[i]] = cv_results
best_parameters_for_splits[self.split_seeds[i]] = best_parameters
if log:
print("Modeling done for all splits")
return (results_for_splits, cv_results_for_splits, best_parameters_for_splits,
dummy_for_splits, tuning_seeds_for_splits)
@staticmethod
def selectKImportance(X, sorted_importance_indices, k):
"""Reduce input data to k important features.
Arguments:
X (array): Input data.
sorted_importance_indices (1D array): Array with sorted indices corresponding to
features, sorting is based on importance of features.
k (int): Number of features to choose.
"""
return X.iloc[:,sorted_importance_indices[:k]]
###############################################################################################
# ModelingWithFeatureSelection class
###############################################################################################
class ModelingWithFeatureSelection(Modeling):
"""Class designed to perform ML modeling with feature selection methods, inherits from Modeling.
"""
def __init__(self, name, param_grid, estimator_seeds, split_seeds, n_combinations=20,
kfolds=5, n_combinations_importances=30, kfolds_importances=10,
rforest_jobs=None, rforest_refit_jobs=None, tuning_jobs=None,
ss_lambda_grid=[0.0001, 0.001, 0.01], ss_n_bootstrap_iterations=100,
ss_threshold=0.6, ss_n_jobs=1, max_iter=1000,
scoring="neg_mean_squared_error", test_size=0.3):
""" ModelingWithFeatureSelection initializer.
Arguments:
name (string): Name describing the experiment.
param_grid (dict): Grid of parameters to search on during hyperparameter tuning.
rforest_seeds (list of ints): Random States for predictive algorithms used.
split_seeds (list of ints): Seeds for data splits.
n_combinations (int): Number of parameters to try during hyperparameter tuning.
kfolds (int): Number of folds of cross validation during hyperparameter tuning.
n_combinations_importances (int): Number of parameters to try during extracting the
feature importances.
kfolds_importances (int): Number of folds of cross validation during extracting the
feature importances.
rforest_jobs (int): Number of cores to use during fitting and predicting with
Random Forest.
rforest_refit_jobs (int): Number of cores to use during refitting RandomForest
after cross-validation hyperparamater tuning.
tuning_jobs (int): Number of cores to use during cross-validation.
ss_lambda_grid (dict): Lambdas to iterate over for StabilitySelection.
ss_n_bootstrap_iterations (int): Number of iterations for StabilitySelection.
ss_threshold (float): Threshold to use for features stability scores.
ss_n_jobs (int): Number of cores to use for StabilitySelection.
scoring (string): Function to optimize during parameter tuning.
test_size (float): Fraction of whole data spent on test set.
Returns:
ModelingWithFeatureSelection object.
"""
# Call parent class initializer
super().__init__(name, param_grid, estimator_seeds, split_seeds, n_combinations, kfolds,
rforest_jobs, rforest_refit_jobs, max_iter, tuning_jobs, scoring, test_size)
# Random Forest specific attributes
self.n_combinations_importances = n_combinations_importances
self.kfolds_importances = kfolds_importances
# ElasticNet / StabilitySelection specific attributes
self.ss_lambda_grid = ss_lambda_grid
self.ss_n_bootstrap_iterations = ss_n_bootstrap_iterations
self.ss_threshold = ss_threshold
self.ss_n_jobs = ss_n_jobs
def enet_feat_importances_single_drug(self, X, y, lasso_seed=None, ss_seed=None, verbose=0):
"""Extract feature importances using StabilitySelection with ElasticNet.
Arguments:
X (DataFrame): Training input data.
y (Series): Training response variable.
lasso_seed (int): Random State of Lasso model.
ss_seed (int): Random State for StabilitySelection.
verbose (int): Controls verbosity of StabilitySelection.
Returns:
selector: Fitted StabilitySelection object.
"""
# Set up elements of modeling pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = Lasso(random_state=lasso_seed, max_iter=self.max_iter)
# Create the pipeline
pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Setup StabilitySelection object
selector = StabilitySelection(base_estimator=pipeline,
lambda_name="estimator__alpha",
lambda_grid=self.ss_lambda_grid,
n_bootstrap_iterations=self.ss_n_bootstrap_iterations,
threshold=self.ss_threshold,
verbose=verbose,
n_jobs=self.ss_n_jobs,
random_state=ss_seed)
selector.fit(X, y)
return selector
def enet_fit_grid_range_of_feats(self, X_train, y_train,
selector, stability_scores_thresholds,
tuning_seed=None,
enet_seed=None, verbose=0, log=False):
"""Perform modeling with ElasticNet for different feature numbers.
Iterate over thresholds of stability scores. For every threshold, perform hyperparameter
tuning and evaluate using same training data (using cross-validation score).
Arguments:
X_train (DataFrame): Training input data.
y_train (Series): Training response variable.
selector (StabilitySelection): Fitted StabilitySelection object.
stability_scores_thresholds (list of floats): Stability scores determining
which features to include.
tuning_seed (int): Random State for parameter tuning.
enet_seed (int): Random State of ElasticNet model.
verbose (int): Controls verbosity of RandomizedSearchCV.
log (bool): Controls output during running.
Returns:
results (dict): Dictionary with feature numbers k as keys and CV results as values.
best_parameters (dict): Dictionary with feature numbers k as keys and parameters
of best found model as values.
"""
# Initialize dictionary for storage of results
results = {}
# Initialize dictionary for storage of best parameters
best_parameters = {}
# Iterate over stability thresholds
for stability_thresh in stability_scores_thresholds:
# Extract corresponding data with reduced features
X_train_k_feats = selector.transform(X_train, threshold=stability_thresh)
k = X_train_k_feats.shape[1]
if k > 0: # If k is smaller, do not perform modeling at all
# Train and evaluate during cross-validation
grid = self.enet_fit_single_drug(X_train_k_feats, y_train, tuning_seed=tuning_seed,
enet_seed=enet_seed, verbose=verbose, refit=False)
cv_best_score, cv_full_results, grid_best_params = self.evaluate_single_drug(
grid, X_train, y_train)
# Record the results for k features and threshold
results[(k, stability_thresh)] = cv_best_score
best_parameters[(k, stability_thresh)] = grid_best_params
if log:
print("Done modeling with threshold {} and {} features".format(
stability_thresh, k))
return results, best_parameters
def enet_get_best_k(self, X, y, stability_scores_thresholds,
verbose_importances=0,
verbose_kfeats=0, log=False):
"""Perform modeling over range of features k for few data splits in order to obtain best
feature number k and stability threshold for particular drug.
Arguments:
X (DataFrame): Whole input data.
y (Series): Whole response variable.
stability_scores_thresholds (list of floats): Stability scores determining
which features to include.
verbose_importances (int): Controls verbosity of RandomizedSearchCV during feature
importances extraction.
verbose_kfeats (int): Controls verbosity of RandomizedSearchCV during modeling with
different numbers of features.
log (bool): Controls output during running.
Returns:
results_for_splits (dict): Nested dictionary with split seeds as keys, and dictionaries
with feature numbers as keys and results as values.
best_parameters_for_splits (dict): Dictionary with split seeds as keys and dictionaries
with feature numbers as keys and best parameters as values.
selectors_for_splits (dict): Dictionary with split seeds as keys and fitted
StabilitySelection objects as values.
tuning_seeds_for_splits (dict): Dictionary with split seeds as keys and random states
of RandomizedSearch as values.
"""
# Initialize dictionary for results over split seeds s and feature numbers k
results_for_splits = {}
# Initialize dictionary with selectors per split
selectors_for_splits = {}
# Initialize dictionary with best parameters over s and k
best_parameters_for_splits = {}
# Initialize dictionary for tuning seed per data split
tuning_seeds_for_splits = {}
# Initialize list of tuning seeds
tuning_seeds = np.random.randint(0, 101, size=len(self.split_seeds))
# Iterate over data splits
for i in range(len(self.split_seeds)):
if log:
print()
print("Modeling for {} out of {} data splits".format(i + 1, len(self.split_seeds)))
print()
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Fit selector in order to get feature importances
selector = self.enet_feat_importances_single_drug(
X_train, y_train,
lasso_seed=self.estimator_seeds[i],
ss_seed=self.estimator_seeds[i],
verbose=verbose_importances)
# Record tuning seed
tuning_seeds_for_splits[self.split_seeds[i]] = tuning_seeds[i]
# Record selector
selectors_for_splits[self.split_seeds[i]] = selector
# Perform modeling and evaluation on range of features
results_range_of_feats, best_parameters_for_ks = self.enet_fit_grid_range_of_feats(
X_train, y_train,
selector,
stability_scores_thresholds,
tuning_seed=tuning_seeds[i],
enet_seed=self.estimator_seeds[i],
verbose=verbose_kfeats,
log=log)
# Perform modeling for all features
grid = self.enet_fit_single_drug(X_train, y_train, tuning_seed=tuning_seeds[i],
enet_seed=self.estimator_seeds[i], verbose=verbose_kfeats, refit=False)
cv_best_score, cv_full_results, grid_best_params = self.evaluate_single_drug(
grid, X_train, y_train)
k = X_train.shape[1]
if log:
print("Done modeling with threshold {} and {} features".format(0.0, k))
# Add results for all features into dictionary
results_range_of_feats[(k, 0.0)] = cv_best_score
best_parameters_for_ks[(k, 0.0)] = grid_best_params
# Record best parameters for ks
best_parameters_for_splits[self.split_seeds[i]] = best_parameters_for_ks
# Record results for this split
results_for_splits[self.split_seeds[i]] = results_range_of_feats
if log:
print("Modeling done for all splits")
return (results_for_splits, best_parameters_for_splits,
selectors_for_splits, tuning_seeds_for_splits)
def enet_model_over_data_splits(self, X, y, stability_scores_thresholds,
verbose_importances=0,
verbose_kfeats=0, log=False):
"""Perform full modeling over data splits. For a single data split, modeling consist of:
1) Extraction of feature importances using whole training data (via
StabilitySelection selector).
2) Hyperparamater tuning and evaluation using just training data
for range of feature importance scores.
After that, best threshold is determined. Then, for the same data splits, model is trained
on whole training set with best found parameters and thresh. After that, model is evaluated
on the test set.
Arguments:
X (DataFrame): Whole input data.
y (Series): Whole response variable.
stability_scores_thresholds (list of floats): Stability scores determining
which features to include.
verbose_importances (int): Controls verbosity of RandomizedSearchCV during feature
importances extraction.
verbose_kfeats (int): Controls verbosity of RandomizedSearchCV during modeling with
different numbers of features.
log (bool): Controls output during running.
Returns:
test_results_for_splits (dict): Dictionary with split seeds as keys and final results on
a test set (in form of namedtuple) as values.
dummy_for_splits (dict): Dictionary with split seeds as keys and results of dummy model
as values.
selectors_for_splits (dict): Dictionary with split seeds as keys and fitted
StabilitySelection objects as values.
best_parameters_for_splits (dict): Dictionary with split seeds as keys and dictionaries
with feature numbers as keys and best parameters as values.
tuning_seeds_for_splits (dict): Dictionary with split seeds as keys and random states
of RandomizedSearch as values.
best_k (int): Best found feature number k.
"""
# Initialize dictionary for storage of test results for given split
test_results_for_splits = {}
# Initialize dictionary with dummy performance
dummy_for_splits = {}
# Get results across splits s and feature numbers k
(results_for_splits, best_parameters_for_splits,
selectors_for_splits, tuning_seeds_for_splits) = self.enet_get_best_k(X, y, stability_scores_thresholds,
verbose_importances,
verbose_kfeats, log)
# Determine the best threshold
# First, create a new result dictionary with only stability thresholds as keys
new_results_for_splits = {}
for s in results_for_splits:
results_over_feats = {}
for tup in results_for_splits[s]:
results_over_feats[tup[1]] = results_for_splits[s][tup]
new_results_for_splits[s] = results_over_feats
# Then, average results for particular threshold over data splits
average_results_per_thresh = []
for thresh in stability_scores_thresholds:
# Thresholds has to be included for all splits if to be considered
present_in_all = True
for s in self.split_seeds:
if thresh not in new_results_for_splits[s]:
present_in_all = False
break
if present_in_all:
sum_of_results = 0
for s in new_results_for_splits.keys():
sum_of_results += new_results_for_splits[s][thresh]
average_result = sum_of_results / len(new_results_for_splits)
average_results_per_thresh.append((thresh, average_result))
# Get the best threshold
best_threshold = min(average_results_per_thresh, key=lambda x: x[1])[0]
# Perform the modeling for best found threshold
# Iterate over data splits (again, same splits as during finding best k)
for i in range(len(results_for_splits)):
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Evaluate dummy
dummy_for_splits[self.split_seeds[i]] = self.evaluate_dummy_model_single_split(
X_train, X_test, y_train, y_test)
# Get data for best threshold
# First, get appropriate selector
sel = selectors_for_splits[self.split_seeds[i]]
# Extract corresponding data
X_train_kfeats = sel.transform(X_train, threshold=best_threshold)
X_test_kfeats = sel.transform(X_test, threshold=best_threshold)
# No need for parameter tuning because we have best parameters
# Set elements of the pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = ElasticNet(random_state=self.estimator_seeds[i], max_iter=self.max_iter)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# No need for parameter tuning because we have best parameters
# Find best parameters
for tup in best_parameters_for_splits[self.split_seeds[i]]:
if tup[1] == best_threshold:
best_tup = tup
break
# Fill best parameters to the to the estimator
main_pipeline.set_params(**best_parameters_for_splits[self.split_seeds[i]][best_tup])
# Fit model on whole training data
main_pipeline.fit(X_train_kfeats, y_train)
# Evaluate the model and record the results
pred = main_pipeline.predict(X_test_kfeats)
# Record results in corresponding Experiment fields, mostly as named tuples
# Classification performance
ModelTestScores = collections.namedtuple("ModelTestScores", ["test_RMSE",
"test_explained_variance", "test_correlation"])
model_test_scores = ModelTestScores(metrics.mean_squared_error(y_test, pred) ** 0.5,
metrics.explained_variance_score(y_test, pred),
pearsonr(y_test, pred))
# Record the best results
test_results_for_splits[self.split_seeds[i]] = model_test_scores
return (test_results_for_splits, dummy_for_splits, selectors_for_splits,
best_parameters_for_splits,
tuning_seeds_for_splits, best_threshold)
def rf_feat_importances_single_drug(self, X, y, rforest_seed=None, tuning_seed=None, verbose=0):
"""Extract feature importances using Random Forest.
First, hyperparameter tuning is performed using kfold cross-validation (sklearn's RandomizedSearchCV).
Then, model is trained on whole training data and feature importances are extracted. In order to use
different number of cores during refitting the model, it is done separately manually, "refit" argument
of RandomizedSearch is set to False.
Arguments:
X (DataFrame): Trainig input data.
y (Series): Training response variable.
rforest_seed (int): Random State of Random Forest Model.
tuning_seed (int): Random State for parameter tuning.
verbose (int): Controls verbosity of RandomizedSearchCV.
Returns:
grid: Fitted RandomizedSearchCV object.
importances: List of tuples (feature name, importance coefficient).
"""
# Setup elements of the pipeline; scaler and estimator
scaler = StandardScaler()
estimator = RandomForestRegressor(random_state=rforest_seed,
n_jobs=self.rforest_jobs)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Setup RandomizedSearch
grid = model_selection.RandomizedSearchCV(main_pipeline,
param_distributions=self.param_grid,
n_iter=self.n_combinations_importances,
scoring=self.scoring,
cv=self.kfolds_importances,
random_state=tuning_seed,
verbose=verbose,
n_jobs=self.tuning_jobs,
pre_dispatch='2*n_jobs',
refit=False)
# Fit the grid
grid.fit(X, y)
# Parse best params to pipeline
main_pipeline.set_params(**grid.best_params_)
# Set the number of cores during refitting
main_pipeline.named_steps["estimator"].n_jobs = self.rforest_refit_jobs
# Refit the model
main_pipeline.fit(X, y)
# Get the feature importances along with feature names
clf = main_pipeline.named_steps["estimator"]
importances = [x for x in zip(X.columns, clf.feature_importances_)]
return grid, importances
def rf_fit_grid_range_of_feats(self, X_train, y_train,
sorted_importance_indices,
feature_numbers, tuning_seed=None,
rforest_seed=None, verbose=0, log=False):
"""Perform modeling for a given range of number of input features.
Iterate over numbers of features k. For every k, perform parameter tuning and evaluate
using cross-validation on training data.
Arguments:
X_train (DataFrame): Training input data.
X_test (DataFrame): Test input data.
y_train (Series): Training response variable.
y_test (Series): Test response variable.
sorted_importance_indices (array): Sorted indexes corresponding to specific features.
feature_numbers (list of ints): Feature numbers k for which do the modeling.
tuning_seed (int): Random State for parameter tuning.
rforest_seed (int): Random State of Random Forest Model.
verbose (int): Controls verbosity of RandomizedSearchCV.
log (bool): Controls output during running.
Returns:
results (dict): Dictionary with feature numbers k as keys and CV results as values.
best_parameters (dict): Dictionary with feature numbers k as keys and parameters
of best found model as values.
"""
# Initialize dictionary for storage of results
results = {}
# Initialize dictionary for storage of best parameters
best_parameters = {}
# Iterate over number of features
for k in feature_numbers:
X_train_kfeats = self.selectKImportance(X_train, sorted_importance_indices, k)
# Train and evaluate
cv_performance, _, best_params = self.fit_and_evaluate(X_train_kfeats, y_train, X_train_kfeats, y_train,
tuning_seed=tuning_seed, rforest_seed=rforest_seed,
verbose=verbose, test_set=False)
# Record the results for k features
results[k] = cv_performance
best_parameters[k] = best_params
if log:
print("Done modeling with {} features".format(k))
return results, best_parameters
def rf_get_best_k(self, X, y, feature_numbers,
verbose_importances=0,
verbose_kfeats=0, log=False):
"""Perform modeling over range of features k for few data splits in order to obtain best
feature number k for particular drug.
Arguments:
X (DataFrame): Whole input data.
y (Series): Whole response variable.
feature_numbers (list of ints): Feature numbers k for which do the modeling.
verbose_importances (int): Controls verbosity of RandomizedSearchCV during feature
importances extraction.
verbose_kfeats (int): Controls verbosity of RandomizedSearchCV during modeling with
different numbers of features.
log (bool): Controls output during running.
Returns:
results_for_splits (dict): Nested dictionary with split seeds as keys, and dictionaries
with feature numbers as keys and results as values.
best_parameters_for_splits (dict): Dictionary with split seeds as keys and dictionaries
with feature numbers as keys and best parameters as values.
importances_for_splits (dict): Dictionary with split seeds as keys and lists of feature
importances as values.
tuning_seeds_for_splits (dict): Dictionary with split seeds as keys and random states
of RandomizedSearch as values.
"""
# Initialize dictionary for results over split seeds s and feature numbers k
results_for_splits = {}
# Initialize dictionary with feature importances per data split
importances_for_splits = {}
# Initialize dictionary with best parameters over s and k
best_parameters_for_splits = {}
# Initialize dictionary for tuning seed per data split
tuning_seeds_for_splits = {}
# Initialize list of tuning seeds
tuning_seeds = np.random.randint(0, 101, size=len(self.split_seeds))
# Iterate over data splits
for i in range(len(self.split_seeds)):
if log:
print("Modeling for {} out of {} data splits".format(i + 1, len(self.split_seeds)))
print()
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Get vector with feature importances
grid, feat_importances = self.rf_feat_importances_single_drug(X_train, y_train,
tuning_seed=tuning_seeds[i],
rforest_seed=self.estimator_seeds[i], verbose=verbose_importances)
# Record tuning seed
tuning_seeds_for_splits[self.split_seeds[i]] = tuning_seeds[i]
# Record feature importances
importances_for_splits[self.split_seeds[i]] = feat_importances
# Extract just the importance coeficients
importances = np.array([x[1] for x in feat_importances])
# Evaluate performance for m (all features)
result_m_features, _, _ = self.evaluate_single_drug(grid, X_test, y_test)
# Perform modeling and evaluation on range of features
sorted_importance_indices = importances.argsort()[::-1]
results_range_of_feats, best_parameters_for_ks = self.rf_fit_grid_range_of_feats(X_train, y_train,
sorted_importance_indices,
feature_numbers,
tuning_seed=tuning_seeds[i],
rforest_seed=self.estimator_seeds[i],
verbose=verbose_kfeats,
log=log)
# Record best parameters for ks
best_parameters_for_splits[self.split_seeds[i]] = best_parameters_for_ks
# Add results for all features
results_range_of_feats[len(feat_importances)] = result_m_features
# Record results for this split
results_for_splits[self.split_seeds[i]] = results_range_of_feats
if log:
print("Modeling done for all splits")
return (results_for_splits, best_parameters_for_splits,
importances_for_splits, tuning_seeds_for_splits)
def rf_model_over_data_splits(self, X, y, feature_numbers,
verbose_importances=0,
verbose_kfeats=0, log=False):
"""Perform full modeling over data splits. For a single data split, modeling consist of:
1) Extraction of feature importances using whole training data.
2) Hyperparamater tuning and evaluation using just training data
for range of feature numbers k.
After that, best k is determined. Then, for the same data splits model is trained
on whole training set with best found parameters and k. After that, model is evaluated
on the test set.
Arguments:
X (DataFrame): Whole input data.
y (Series): Whole response variable.
feature_numbers (list of ints): Feature numbers k for which do the modeling.
verbose_importances (int): Controls verbosity of RandomizedSearchCV during feature
importances extraction.
verbose_kfeats (int): Controls verbosity of RandomizedSearchCV during modeling with
different numbers of features.
log (bool): Controls output during running.
Returns:
test_results_for_splits (dict): Dictionary with split seeds as keys and final results on
a test set (in form of namedtuple) as values.
dummy_for_splits (dict): Dictionary with split seeds as keys and results of dummy model
as values.
importances_for_splits (dict): Dictionary with split seeds as keys and feature importances
as values.
best_parameters_for_splits (dict): Dictionary with split seeds as keys and dictionaries
with feature numbers as keys and best parameters as values.
tuning_seeds_for_splits (dict): Dictionary with split seeds as keys and random states
of RandomizedSearch as values.
best_k (int): Best found feature number k.
"""
# Initialize dictionary for storage of test results for given split
test_results_for_splits = {}
# Initialize dictionary with dummy performance
dummy_for_splits = {}
# Get results across splits s and feature numbers k
(results_for_splits, best_parameters_for_splits,
importances_for_splits, tuning_seeds_for_splits) = self.rf_get_best_k(X, y, feature_numbers,
verbose_importances,
verbose_kfeats, log)
# Determine the best k
# First, average results for particular k over data splits
average_results_per_k = []
for k in feature_numbers:
sum_of_results = 0
for s in results_for_splits.keys():
sum_of_results += results_for_splits[s][k]
average_result = sum_of_results / len(results_for_splits)
average_results_per_k.append((k, average_result))
# Get the maximum k
best_k = min(average_results_per_k, key=lambda x: x[1])[0]
# Perform the modeling for best found k
# Iterate over data splits (again, same splits as during finding best k)
for i in range(len(results_for_splits)):
# Split data into training and test set
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=self.test_size, random_state=self.split_seeds[i])
# Evaluate dummy
dummy_for_splits[self.split_seeds[i]] = self.evaluate_dummy_model_single_split(
X_train, X_test, y_train, y_test)
# Get data for best_k features
importances = np.array([x[1] for x in importances_for_splits[self.split_seeds[i]]])
sorted_importance_indices = importances.argsort()[::-1]
X_train_kfeats = self.selectKImportance(X_train, sorted_importance_indices, best_k)
X_test_kfeats = self.selectKImportance(X_test, sorted_importance_indices, best_k)
# No need for parameter tuning because we have best parameters
# Set elements of the pipeline, i.e. scaler and estimator
scaler = StandardScaler()
estimator = RandomForestRegressor(random_state=self.estimator_seeds[i], n_jobs=self.rforest_refit_jobs)
# Create pipeline
main_pipeline = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# No need for parameter tuning because we have best parameters
# Fill best parameters to the to the estimator
main_pipeline.set_params(**best_parameters_for_splits[self.split_seeds[i]][best_k])
# Fit model on whole training data
main_pipeline.fit(X_train_kfeats, y_train)
# Evaluate the model and record the results
pred = main_pipeline.predict(X_test_kfeats)
# Record results in corresponding Experiment fields, mostly as named tuples
# Classification performance
ModelTestScores = collections.namedtuple("ModelTestScores", ["test_RMSE",
"test_explained_variance", "test_correlation"])
model_test_scores = ModelTestScores(metrics.mean_squared_error(y_test, pred) ** 0.5,
metrics.explained_variance_score(y_test, pred),
pearsonr(y_test, pred))
# Record the best results
test_results_for_splits[self.split_seeds[i]] = model_test_scores
return (test_results_for_splits, dummy_for_splits, importances_for_splits,
best_parameters_for_splits,
tuning_seeds_for_splits, best_k)
#################################################################################################
# ModelingResults class
#################################################################################################
class ModelingResults(Modeling):
def __init__(self, parent):
if isinstance(parent, ModelingWithFeatureSelection):
super().__init__(parent.name, parent.param_grid, parent.estimator_seeds,
parent.split_seeds, parent.n_combinations,
parent.kfolds, parent.rforest_jobs,
parent.tuning_jobs, parent.scoring, parent.test_size)
# RandomForest specific attributes
self.n_combinations_importances = parent.n_combinations_importances
self.kfolds_importances = parent.kfolds_importances
# ElasticNet / StabilitySelection specific attributes
self.ss_lambda_grid = parent.ss_lambda_grid
self.ss_n_bootstrap_iterations = parent.ss_n_bootstrap_iterations
self.ss_threshold = parent.ss_threshold
self.ss_n_jobs = parent.ss_n_jobs
# Create fields for storage of results
# All fields are dictionaries with drugs as keys and dictonaries with split seeds
# as keys as values
self.performance_dict = {}
self.dummy_performance_dict = {}
self.importances_dict = {}
self.best_params_dict = {}
self.tuning_seeds_dict = {}
self.best_k_dict = {}
else:
super().__init__(parent.name, parent.param_grid, parent.estimator_seeds,
parent.split_seeds, parent.n_combinations,
parent.kfolds, parent.rforest_jobs,
parent.rforest_refit_jobs, parent.max_iter,
parent.tuning_jobs, parent.scoring, parent.test_size)
# Create fields for storage of results
# All fields are dictionaries with drugs as keys and dictonaries with split seeds
# as keys as values
self.performance_dict = {}
self.dummy_performance_dict = {}
self.best_params_dict = {}
self.tuning_seeds_dict = {}
self.data_shapes = {}
self.cv_results_dict = {}
def create_raw_results_df(self, drug_annotations_df=None, remove_duplicates=True):
"""Put results storage in dictonaries into one pandas DataFrame.
Each row in the raw DataFrame will consist of drug, data split seed, and results.
The DataFrame stores just the raw results coming from dictonaries.
Arguments:
drug_annotations_df (DataFrame): list of drugs from GDSC. If parsed, it is used to
extract compounds target pathways.
Returns:
df (DataFrame): DataFrame with raw experiment results.
"""
# Initialize future columns
drug_names = []
drug_ids = []
split_seeds_columns = []
test_rmses = []
test_correlations = []
corr_pvals = []
dummy_rmses = []
# If modeling was with feature selection, add a column with best feature number k
if hasattr(self, "best_k_dict"):
if len(self.best_k_dict) > 0:
best_ks = []
# If drug_annotations_df is parsed, extract target pathway information
if drug_annotations_df is not None:
target_pathways = []
for drug_tuple in self.performance_dict:
test_performance = self.performance_dict[drug_tuple]
dummy_performance = self.dummy_performance_dict[drug_tuple]
if hasattr(self, "best_k_dict"):
if len(self.best_k_dict) > 0:
best_k = self.best_k_dict[drug_tuple]
# Check if modeling was with Stability Selection
if best_k <= 1.0:
corresponding_ks = []
for split_seed in self.best_params_dict[drug_tuple]:
for k, thresh in self.best_params_dict[drug_tuple][split_seed].keys():
if thresh == best_k:
corresponding_ks.append(k)
best_k = int(np.mean(corresponding_ks))
if drug_annotations_df is not None:
pathway = drug_annotations_df[
drug_annotations_df["DRUG_ID"] == drug_tuple[1]]["TARGET_PATHWAY"].iloc[0]
for split_seed in test_performance.keys():
test_results = test_performance[split_seed]
dummy_results = dummy_performance[split_seed]
# Fill the columns lists
drug_names.append(drug_tuple[0])
drug_ids.append(drug_tuple[1])
split_seeds_columns.append(split_seed)
test_rmses.append(test_results.test_RMSE)
test_correlations.append(test_results.test_correlation[0])
corr_pvals.append(test_results.test_correlation[1])
dummy_rmses.append(dummy_results.test_RMSE)
if hasattr(self, "best_k_dict"):
if len(self.best_k_dict) > 0:
best_ks.append(best_k)
if drug_annotations_df is not None:
target_pathways.append(pathway)
# Put column lists into DataFrame
df = pd.DataFrame()
df.insert(0, "Drug ID", drug_ids)
df.insert(1, "Drug Name", drug_names)
df.insert(2, "Split seed", split_seeds_columns)
df.insert(3, "Model test RMSE", test_rmses)
df.insert(4, "Model test correlation", test_correlations)
df.insert(5, "Correlation pval", corr_pvals)
df.insert(6, "Dummy test RMSE", dummy_rmses)
df.insert(7, "Relative test RMSE", df["Dummy test RMSE"] / df["Model test RMSE"])
if hasattr(self, "best_k_dict"):
if len(self.best_k_dict) > 0:
df.insert(8, "Best k", best_ks)
if drug_annotations_df is not None:
df.insert(8, "Target Pathway", target_pathways)
if remove_duplicates:
filepath = "/media/krzysztof/Nowy/Doktorat - Modelling drug efficacy in cancer/Projects/Results Assesment/Results and other files/drug_ids_to_keep_in_results.pkl"
# Load list with drug ids to keep
with open(filepath, "rb") as f:
ids_to_keep = dill.load(f)
return df[df["Drug ID"].isin(ids_to_keep)]
return df
def create_agg_results_df(self, columns_to_aggregate, metric_to_aggregate, drug_annotations_df=None,
remove_duplicates=True):
"""Compute DataFrame with results grouped by Drug ID and aggregated by mean."""
# First, create the DF with raw results
df = self.create_raw_results_df(drug_annotations_df, remove_duplicates)
# Group by Drug ID
df_grouped = df.groupby("Drug ID", as_index=False)
# Aggregate by chosen metric
agg_dict = {"Drug Name": lambda x: x.iloc[0]}
for metric in columns_to_aggregate:
agg_dict[metric] = metric_to_aggregate
if drug_annotations_df is not None:
agg_dict["Target Pathway"] = lambda x: x.iloc[0]
return df_grouped.agg(agg_dict)
def performance_barplots_single(self, metric,
columns_to_aggregate=["Model test RMSE", "Model test correlation",
"Dummy test RMSE", "Relative test RMSE"],
metric_to_aggregate="mean", drug_annotations_df=None,
figsize=(35, 12), title_size=45, label_size=30,
tick_size=20, width=0.7, grid=True, half=1,
hline_width=2., save_directory=None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(metric + " for each drug", fontsize=title_size)
ax.set_xlabel("Drug", fontsize=label_size)
ax.set_ylabel(metric, fontsize=label_size)
# Compute DataFrame with results
df = self.create_agg_results_df(columns_to_aggregate, metric_to_aggregate,
drug_annotations_df)
# Set up DataFrame slicing
if half == 1:
start_idx = 0
end_idx = df.shape[0] // 2
elif half == 2:
start_idx = df.shape[0] // 2
end_idx = df.shape[0]
else:
start_idx = 0
end_idx = df.shape[0]
df.sort_values(metric, ascending = False).iloc[start_idx:end_idx].plot(
x = "Drug Name", y = metric ,kind="bar", ax = ax, width = width,
figsize=figsize, fontsize=tick_size, grid = grid, legend=False)
# If a metric is Relative Test RMSE, add a horziontal line at 1,
# represanting a baseline
if metric == "Relative test RMSE":
ax.axhline(y = 1.0, linewidth=hline_width, color="black")
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.show()
def overall_boxplot_single(self, metric,aggregated=True, title="",
columns_to_aggregate=["Model test RMSE", "Model test correlation",
"Dummy test RMSE", "Relative test RMSE"],
metric_to_aggregate="mean", drug_annotations_df=None,
figsize=(8, 6), title_size=25, label_size=20,
tick_size=18, grid=True, save_directory=None):
# If aggregated is True, plot aggregated results
if aggregated:
# Compute DataFrame with aggregated results
df = self.create_agg_results_df(columns_to_aggregate, metric_to_aggregate,
drug_annotations_df)
# Actual plotting
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.title(title, fontsize = title_size)
if grid:
plt.grid()
sns.boxplot(x = df[metric], orient="v")
plt.xlabel("", fontsize=label_size)
plt.ylabel(metric, fontsize=label_size)
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.show()
# Otherwise, plot results for different data splits separately
else:
# Compute DataFrame with raw results
df = self.create_raw_results_df(drug_annotations_df)
# Actual plotting
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.title(title, fontsize = title_size)
if grid:
plt.grid()
sns.boxplot(x="Split seed", y=metric, data=df)
plt.xlabel("Data split seed", fontsize=label_size)
plt.ylabel(metric, fontsize=label_size)
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.show()
def extract_feature_ranks_per_split_ss(self, drug_tuple, split_seed, gene_expression_df):
# Find appropriate selector
selector = self.importances_dict[drug_tuple][split_seed]
# Extract array containing max stabilty scores for features (should be a vector)
scores_vector = selector.stability_scores_.max(axis=1)
# Create a list with tuples (gene_id, max_score)
feats_with_scores = [x for x in zip(gene_expression_df.ensembl_gene, scores_vector)]
# Create dictionary of the form gene_id: rank
ranks_dict = {}
for rank, feat_tuple in enumerate(sorted(feats_with_scores, key=lambda x: x[1], reverse=True)):
ranks_dict[feat_tuple[0]] = rank + 1
# Create a final list of tuples (gene_id, rank) in the same order as in original gene_expression_df
ranks_list = []
for gene_id in gene_expression_df.ensembl_gene:
ranks_list.append((gene_id, ranks_dict[gene_id]))
return ranks_list
def extract_feature_ranks_aggregated_over_splits_ss(self, drug_tuple, gene_expression_df):
# Initialize array for storage of intermediate results
ranks_vector = np.zeros(gene_expression_df.shape[0])
# Iterate over data splits and add corresponding features ranks
for split_seed in self.importances_dict[drug_tuple]:
features_with_rank = self.extract_feature_ranks_per_split_ss(drug_tuple, split_seed,
gene_expression_df)
ranks_vector = ranks_vector + np.array([x[1] for x in features_with_rank])
# Divide ranks_vector by number of splits to get mean rank
ranks_vector = ranks_vector / len(self.importances_dict[drug_tuple])
# Add gene ids and return the results
return [x for x in zip(gene_expression_df.ensembl_gene, ranks_vector)]
def extract_topk_relevant_features_ss(self, drug_tuple, k, gene_expression_df,
just_gene_ids=True):
# Compute genes along with corresponidng aggregated ranks over splits
genes_with_ranks = self.extract_feature_ranks_aggregated_over_splits_ss(drug_tuple,
gene_expression_df)
# If just_gene_ids is True, return only IDs corresponging to k
# most relevant genes
if just_gene_ids:
return [x[0] for x in sorted(genes_with_ranks, key=lambda x: x[1])[:k]]
# Otherwise, return gene IDs along with corresponding mean ranks
else:
return sorted(genes_with_ranks, key=lambda x: x[1])[:k]
def extract_feature_ranks_per_split_rforest(self, drug_tuple, split_seed):
# Extract proper vector with importance coeeficients
feat_coeffs = self.importances_dict[drug_tuple][split_seed]
# Create dictionary of the form gene_id: rank
ranks_dict = {}
for rank, feat_tuple in enumerate(sorted(feat_coeffs, key=lambda x: x[1], reverse=True)):
ranks_dict[feat_tuple[0]] = rank + 1
# Initialize final list with tuples (gene_id, rank), in the same order as in
# original list
ranks_list = []
# Iterate over rank_dict and add entriess into rank_list
for gene_id in [x[0] for x in feat_coeffs]:
ranks_list.append((gene_id, ranks_dict[gene_id]))
return ranks_list
def extract_feature_ranks_aggregated_over_splits_rforest(self, drug_tuple):
# Initialize array for storage of intermediate results
ranks_vector = np.zeros(17737)
# Iterate over data splits and add corresponding feature ranks
for split_seed in self.importances_dict[drug_tuple]:
features_with_rank = self.extract_feature_ranks_per_split_rforest(drug_tuple,
split_seed)
ranks_vector = ranks_vector + np.array([x[1] for x in features_with_rank])
# Divide ranks vector by number of splits to get mean ranks
ranks_vector = ranks_vector / len(self.importances_dict[drug_tuple])
# Add gene ids and return the results
gene_ids = [x[0] for x in features_with_rank]
return [x for x in zip(gene_ids, ranks_vector)]
def extract_topk_relevant_features_rforest(self, drug_tuple, k, just_genes_ids=True):
# Compute genes along with corresponding aggregated ranks over splits
genes_with_ranks = self.extract_feature_ranks_aggregated_over_splits_rforest(drug_tuple)
# If just_gene_ids is True, return only IDs corresponging to k
# most relevant genes
if just_genes_ids:
return [x[0] for x in sorted(genes_with_ranks, key=lambda x: x[1])[:k]]
# Otherwise, return gene IDs along with corresponding mean ranks
else:
return sorted(genes_with_ranks, key=lambda x: x[1])[:k]
def reproduce_cv_results_vs_feat_numbers_ss(self):
pass
def reproduce_cv_results_vs_feat_numbers_rforest(self, drug_tuple, drug_annotations_df,
gene_expression_df, drug_response_df,
rforest_jobs=1, tuning_jobs=1, log=False):
# First, create corresponding DrugGenomeWide object
# Get appropriate drug attribute from drug annotations
row = drug_annotations_df[drug_annotations_df["DRUG_ID"] == drug_tuple[1]]
gdsc_id = row["DRUG_ID"].iloc[0]
name = row["DRUG_NAME"].iloc[0]
targets = row["TARGET"].iloc[0].split(", ")
target_pathway = row["TARGET_PATHWAY"].iloc[0]
drug = DrugGenomeWide(gdsc_id, name, targets, target_pathway)
print(drug)
print(drug.targets)
print(self.param_grid)
# Extract full data
data = drug.return_full_data(drug_response_df, gene_expression_df, data_combination=["expression"])
if log:
print("Reproducing results for drug {}, {}, data shape: {}.".format(
drug.name, drug_gdsc_id, data.shape))
# Extract features and labels
y = data["AUC"]
X = data.drop(["cell_line_id", "AUC"], axis=1)
assert X.shape[1] == 17737 and X.shape[0] == y.shape[0]
cv_results_over_splits = {}
# Iterate over data splits
c = 0
for seed_of_interest in exp_results.performance_dict[drug_tuple]:
if log:
print()
print("Reproducing for split seed:", seed_of_interest)
# Split into train and test
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.3,
random_state=seed_of_interest)
# Inportance vector
importances = np.array([x[1] for x in exp_results.importances_dict[drug_tuple][seed_of_interest]])
indices = importances.argsort()[::-1]
# Initialize dictionary with cross-validation results over feature numbers k
cv_scores = {}
for k in exp_results.best_params_dict[drug_tuple][seed_of_interest]:
# Extract reduced Xs
X_train_reduced = self.selectKImportance(X_train, indices, k)
X_test_reduced = self.selectKImportance(X_test, indices, k)
# Setup the model
scaler = StandardScaler()
estimator = RandomForestRegressor(random_state=exp_results.estimator_seeds[c],
n_jobs=rforest_jobs)
pipe = Pipeline([
("scaler", scaler),
("estimator", estimator)
])
# Set best params
best_params = exp_results.best_params_dict[drug_tuple][seed_of_interest][k]
pipe.set_params(**best_params)
# Get cross-val score
kfolds_results = model_selection.cross_val_score(pipe, X_train_reduced, y_train,
scoring = "neg_mean_squared_error", cv=self.kfolds,
n_jobs=tuning_jobs)
res = (-np.mean(kfolds_results)) ** 0.5
if log:
print("Done modeling with {} features".format(k))
cv_scores[k] = res
c += 1
cv_results_over_splits[seed_of_interest] = cv_scores
return cv_results_over_splits
@staticmethod
def comparative_df(experiments, flags, flag_name="Model",
columns_to_aggregate=["Model test RMSE", "Model test correlation",
"Dummy test RMSE", "Relative test RMSE"],
metric_to_aggregate="mean", drug_annotations_df=None,
remove_duplicates=True):
# Compute aggregated DF with results for every experiment
dfs = []
for i in range(len(experiments)):
df = experiments[i].create_agg_results_df(columns_to_aggregate, metric_to_aggregate,
drug_annotations_df, remove_duplicates)
dfs.append(df)
# Find the intersection in drugs
drugs_intersection = set(dfs[0]["Drug ID"])
for i in range(1, len(dfs)):
df = dfs[i]
drugs_intersection = drugs_intersection.intersection(df["Drug ID"])
drugs_intersection = list(drugs_intersection)
# Create a list of DataFrames with common drugs
dfs_common = [1 for x in range(len(experiments))]
for i in range(len(dfs)):
df_common = dfs[i][dfs[i]["Drug ID"].isin(drugs_intersection)]
assert df_common.shape[0] == len(drugs_intersection)
# Add a column with appropriate flags
flags_column = [flags[i]] * df_common.shape[0]
df_common[flag_name] = flags_column
dfs_common[i] = df_common
# Concatenate all DataFrames
comparative_df = pd.concat(dfs_common, axis=0)
# Sort final DF bu drug IDs
return comparative_df.sort_values("Drug ID")
@staticmethod
def barplot_from_comparative_df(data, x, y, hue=None, order=None, hue_order=None, title="",
figsize=(35, 12), title_size=45, label_size=30,
tick_size=20, width=0.7, grid=True, half=1,
hline_width=2., legend_fontsize=25, save_directory=None,
xticks_off=False, ax=None):
params = {"legend.fontsize": legend_fontsize,
"legend.handlelength": 2}
plt.rcParams.update(params)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
ax.set_title(title, fontsize=title_size)
ax.set_xlabel(x, fontsize=label_size)
ax.set_ylabel(y, fontsize=label_size)
sns.barplot(x, y, hue=hue,
data=data,
order=order,
hue_order=hue_order,
ax=ax)
plt.xticks(rotation='vertical')
plt.tick_params("both", labelsize=tick_size)
if xticks_off:
ax.set_xlabel("")
plt.xticks([], [])
plt.legend(title = "")
# If a metric is Relative Test RMSE, add a horziontal line at 1,
# represanting a baseline
if y == "Relative test RMSE":
ax.axhline(y = 1.0, linewidth=hline_width, color="black")
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.show()
def catplot_from_comparative_df(data, x, y, hue=None, order=None, hue_order=None, col=None, row=None,
kind="bar", title="",
height=5, aspect=2., title_size=45, label_size=35,
tick_size=25, width=0.7, grid=False, half=1,
hline_width=2., legend_fontsize=25, save_directory=None,
xticks_off=False, ax=None, legend=True, legend_out=False, marker_size=None):
params = {"legend.fontsize": legend_fontsize,
"legend.handlelength": 2}
plt.rcParams.update(params)
arguments = {"x": x, "y": y, "hue": hue, "data": data, "kind": kind,
"order":order, "hue_order": hue_order, "col":col, "row": row,
"height": height, "aspect": aspect, "ax": ax,
"legend_out": legend_out, "legend": legend}
if kind in ("bar", "violin", "box"):
g = sns.catplot(**arguments)
else:
g = sns.catplot(**arguments, s=marker_size)
plt.tick_params("both", labelsize=tick_size)
plt.xticks(rotation="vertical")
plt.xlabel(x, fontsize=label_size)
plt.ylabel(y, fontsize=label_size)
plt.title(title, fontsize=40)
if xticks_off:
ax.set_xlabel("")
plt.xticks([], [])
if grid:
plt.grid()
plt.legend(title = "")
# If a metric is Relative Test RMSE, add a horziontal line at 1,
# represanting a baseline
if y == "Relative test RMSE":
plt.axhline(y=1., color='b', linewidth=hline_width)
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.close(g.fig)
plt.show()
@staticmethod
def sort_drugs_by_comparable_performance(comparative_df, model_to_compare, metric, flag_name="Model"):
results = []
for drug_name in comparative_df["Drug Name"].unique():
current_df = comparative_df[comparative_df["Drug Name"] == drug_name]
baseline_performance = current_df[current_df[flag_name] == model_to_compare][
metric
].iloc[0]
# Find the best model among rest
if metric == "Model test RMSE":
best_metric = 1.
for model in current_df[flag_name].unique():
if model != model_to_compare:
performance = current_df[current_df[flag_name] == model][metric].iloc[0]
if performance < best_metric:
best_metric = performance
else:
best_metric = 0.0
for model in current_df[flag_name].unique():
if model != model_to_compare:
performance = current_df[current_df[flag_name] == model][metric].iloc[0]
if performance > best_metric:
best_metric = performance
# Establish relative metric
relative_performance = baseline_performance / best_metric
# Add to results
results.append((drug_name, relative_performance))
# Sort the results
if metric == "Model test RMSE":
order = sorted(results, key=lambda x: x[1])
else:
order = sorted(results, key=lambda x: x[1], reverse=True)
return order
@staticmethod
def boxplot_from_comparative_df(data, x, y, hue=None, order=None, title="",
figsize=(8, 6), title_size=25, label_size=20,
tick_size=18, grid=True, rotation=-90, save_directory=None,
xticks_off=False):
# Actual plotting
fig = plt.figure(figsize=figsize)
plt.tick_params(labelsize=tick_size)
plt.title(title, fontsize = title_size)
plt.grid()
sns.boxplot(x, y, data=data, hue=hue, order=order)
plt.xlabel("", fontsize=label_size)
plt.ylabel(y, fontsize=label_size)
plt.xticks(rotation=rotation)
if xticks_off:
plt.xticks([], [])
plt.tight_layout()
if save_directory:
plt.savefig(save_directory)
plt.show()
@staticmethod
def filterout_good_drugs(data, rel_rmse_threshold=1.01, correlation_threshold=0.0, flag_name="Model"):
# Extract drug IDs for which one of the models exceeded threshold
good_drug_ids = []
for drug_id in data["Drug ID"].unique():
current_df = data[data["Drug ID"] == drug_id]
for model in current_df[flag_name]:
current_rel_rmse = current_df[current_df[flag_name] == model][
"Relative test RMSE"].iloc[0]
current_correlation = current_df[current_df[flag_name] == model][
"Model test correlation"].iloc[0]
if (current_rel_rmse > rel_rmse_threshold) and (current_correlation > correlation_threshold):
good_drug_ids.append(drug_id)
break
# Return DataFrame containing only well performing drugs
return data[data["Drug ID"].isin(good_drug_ids)]
@staticmethod
def get_best_results(overall_agg_dataframe, metric="Model test correlation"):
ides = []
names = []
corrs = []
rel_rmses = []
test_rmses = []
dummy_rmses = []
corr_pvals = []
experiment_names = []
pathways = []
# Iterate over drugs
for drug_id in overall_agg_dataframe["Drug ID"].unique():
df = overall_agg_dataframe[overall_agg_dataframe["Drug ID"] == drug_id]
# Find row corresponding the best metric
if metric == "Model test correlation" or metric == "Relative test RMSE":
max_idx = df[metric].idxmax()
max_row = df.loc[max_idx]
else:
max_idx = df[metric].idxmin()
max_row = df.loc[max_idx]
name = max_row["Drug Name"]
corr = max_row["Model test correlation"]
rel_rmse = max_row["Relative test RMSE"]
test_rmse = max_row["Model test RMSE"]
experiment = max_row["Model"]
pathway = max_row["Target Pathway"]
corr_pval = max_row["Correlation pval"]
dummy_rmse = max_row["Dummy test RMSE"]
ides.append(drug_id)
names.append(name)
corrs.append(corr)
rel_rmses.append(rel_rmse)
test_rmses.append(test_rmse)
experiment_names.append(experiment)
pathways.append(pathway)
corr_pvals.append(corr_pval)
dummy_rmses.append(dummy_rmse)
# Put into DataFrame
df_best_results = | pd.DataFrame() | pandas.DataFrame |
"""
Automatic dataset upload to google drive
<NAME> - <EMAIL>
"""
from logging import root
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from tqdm import tqdm
import pandas as pd
import queue
import sys
import os
from os.path import isfile, join
from time import perf_counter
import collection
import cv2
LOCAL_PATH_TO_TMP = "/Users/johnmarangola/Desktop/repos/cv-chess/core/vision/tmp/"
DATASET_METADATA_FILENAME = "my_csv.csv"
METADATA_FIELDS = ["File", "Piece Color", "Piece Type", "Position", "ID", "Batch ID"]
from concurrent.futures import ThreadPoolExecutor
from time import perf_counter
import preprocessing as pre
def get_id(drive, name):
"""
Get the ID of a file in Google Drive
Args:
name (str): Filename
Returns:
str: Google drive file ID
"""
file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
ids = []
for file1 in file_list:
if file1["title"] == name:
ids.append(file1["id"])
if len(ids) == 1: return ids[0]
return None
def download(drive, filename):
"""
Download a file from root directory of google drive
Args:
GoogleDrive object: Access to google drive
filename (str): Filename to download
Returns:
[type]: [description]
"""
_id = get_id(filename)
if _id is None:
return False
temp = drive.CreateFile({'id':_id})
temp.GetContentFile(filename)
return True
def upload_as_child(drive, filename, folder_id):
"""
Upload a file to a parent folder
Args:
drive (GoogleDrive object): Access to Google Drive
filename (str): Name of file to be uploaded
folder_id (str): Parent folder drive ID
Returns:
GoogleDriveFile: Uploaded file
"""
image_file = drive.CreateFile({'parents': [{'id': folder_id}]})
image_file.SetContentFile(filename)
image_file.Upload()
return image_file
def create_root_folder(drive, name):
"""
Create a root folder in Google Drive
Args:
drive (GoogleDrive object): Access to google drive
name (str): Folder name
Returns:
str: Folder ID
"""
for file in drive.ListFile({'q': f"'root' in parents and trashed=false"}).GetList():
if file['title'] == name:
return None
root_folder = drive.CreateFile({'title':name, 'mimeType':"application/vnd.google-apps.folder"})
root_folder.Upload()
return root_folder['id']
def add_sub_directory(drive, parent_id, sub_dir):
"""
Add subfolder to parent directory
Args:
drive (GoogleDrive object): Access to google drive
parent_id (str): ID of parent directory
sub_dir (str): Name of subfolder
Returns:
str: ID of subfolder
"""
# check to make sure sub-directory does not exist yet:
for file in drive.ListFile({'q': f"'{parent_id}' in parents and trashed=false"}).GetList():
if file['title'] == sub_dir:
return False
sub_dir = drive.CreateFile({'title':sub_dir,"parents":[{'id':parent_id}],'mimeType':"application/vnd.google-apps.folder"})
sub_dir.Upload()
return sub_dir['id']
def upload_local_dataset(dataset_name, folder_id, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME):
"""
Upload a local dataset to a Google Drive that named dataset_name.
Args:
dataset_name (str): Name of dataset to be uploaded to Google Drive.
folder_id (str): Google drive ID of folder that the dataset is uploaded within.
local_path (str, optional): Local absolute path of cv-chess/core/vision/tmp/. Defaults to LOCAL_PATH_TO_TMP.
metadata_filename (str optional): Name of metadata file (includes .csv). Defaults to DATASET_METADATA_FILENAME.
Returns:
[type]: [description]
"""
# Read in local metadata
os.chdir(local_path)
try:
local_meta = pd.read_csv(metadata_filename)
except:
print(f"Unable to load {metadata_filename} from {LOCAL_PATH_TO_TMP}. Exiting...")
return False
# Walk through directory, finding valid files to upload
im_upload = []
for file in os.listdir(local_path):
if file.endswith(".jpg") and file[0] == "f":
im_upload.append(file)
# initialize empty queue
#q = queue.Queue()
t1 = perf_counter() # Start runtime clock
# Concurrently execute file uploads using 100 workers for the thread pool
with ThreadPoolExecutor(max_workers=50) as executor:
for file in tqdm (im_upload, desc="Threading upload", ascii=False, ncols=100):
executor.submit(push_to_drive_as_child, drive, local_meta, file, folder_id)
# Dequeue drive ids, adding each to metadata as it is popped from the queue
#while not q.empty():
# _row, _id = q.get()
# local_meta.at[_row, "ID"] = _id
t1 -= perf_counter()
# Clean up dataframe from auto-add during copying and writing operations
#for col in local_meta.columns.tolist():
# Remove any column that is not an essential metadata field
# if col not in METADATA_FIELDS:
# del local_meta[col]
local_meta.to_csv(path_or_buf=local_path + metadata_filename)
# Upload metadata to google drive
upload_as_child(drive, metadata_filename, folder_id)
print(f"Total upload time: {abs(t1)}s")
def upload_new_dataset(dataset_name, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME):
"""
Upload a new dataset to folder in Google Drive
Args:
dataset_name (str): Name of new dataset folder
local_path (str, optional): Path to cv-chess/core/vision/. Defaults to "/Users/johnmarangola/Desktop/repos/cv-chess/core/vision/".
Returns:
boolean: True if dataset successfully uploaded, False otherwise.
"""
drive = authenticate()
if get_id(drive, dataset_name) is not None:
print(f"Dataset {dataset_name} already exists. Exiting...")
return False
root_id = create_root_folder(drive, dataset_name)
if root_id is None:
print("Error.")
return False
# Upload the dataset from local to Drive
return upload_local_dataset(dataset_name, root_id, local_path=LOCAL_PATH_TO_TMP, metadata_filename=DATASET_METADATA_FILENAME)
def add_to_existing_dataset(dataset_name, local_path=LOCAL_PATH_TO_TMP, cloud_metadata_filename=DATASET_METADATA_FILENAME):
drive = authenticate()
folder_id = get_id(drive, dataset_name)
# Check to ensure that the dataset folder exists in Google Drive
if folder_id is None:
print(f"Dataset {dataset_name} not found")
return False
folder_id_string = "\'" + folder_id + "\'" + " in parents and trashed=false"
file_list = drive.ListFile({'q': folder_id_string}).GetList()
metadata_id = None
# Iterate through dataset directory, searching for metadata filename
for file in file_list:
if file['title'] == cloud_metadata_filename:
metadata_id = file['id']
metadata_file = drive.CreateFile({'id':metadata_id})
metadata_file.GetContentFile(cloud_metadata_filename)
break
# Exit if could not find metadata .csv
if metadata_id is None:
print("Metadata .csv not found. Exiting...")
sys.exit()
cloud_metadata_df = pd.read_csv(cloud_metadata_filename)
os.chdir(local_path)
try:
local_meta = | pd.read_csv(cloud_metadata_filename) | pandas.read_csv |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, | Timestamp('2008-10-23 05:53:11') | pandas.Timestamp |
import os
import sys
import json
import logging
from datetime import datetime, timedelta
import tweepy
import numpy as np
import pandas as pd
from months import MONTHS_DICT
DATA_SOURCE = "https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/vaccinations.csv"
BAR_CHARS = 16
# People over 12 years old (according to INE projected data for 2021)
# NOTE: This number is still below the 75% threshold for herd immunity
# It represents only ~71% of the population
TOTAL_POP = 17109746
VAX_POP = 12293144
def logging_setup():
logging.basicConfig(filename='logs/bot.log', level=logging.INFO, format='%(message)s')
return
def should_tweet(df):
final_line = None
with open("logs/bot.log", "r") as log_file:
for line in log_file:
final_line = line
pass
if final_line is None:
return True
final_date = datetime.strptime(final_line[:10], '%Y-%m-%d')
print(final_date, df['date'].values[-1])
return (df.tail(1)['date'] > final_date).any()
# From https://github.com/imbstt/impf-progress-bot/blob/master/bot.py
def generate_bar(percentage, n_chars = None):
if not n_chars:
n_chars = BAR_CHARS
num_filled = round(percentage*n_chars)
num_empty = n_chars-num_filled
display_percentage = str(round(percentage*100, 1))
msg = '{}{} {}%'.format('▓'*num_filled, '░'*num_empty, display_percentage)
return msg
def get_data():
df = | pd.read_csv(DATA_SOURCE) | pandas.read_csv |
"""
Tasks
-------
Search and transform jsonable structures, specifically to make it 'easy' to make tabular/csv output for other consumers.
Example
~~~~~~~~~~~~~
*give me a list of all the fields called 'id' in this stupid, gnarly
thing*
>>> Q('id',gnarly_data)
['id1','id2','id3']
Observations:
---------------------
1) 'simple data structures' exist and are common. They are tedious
to search.
2) The DOM is another nested / treeish structure, and jQuery selector is
a good tool for that.
3a) R, Numpy, Excel and other analysis tools want 'tabular' data. These
analyses are valuable and worth doing.
3b) Dot/Graphviz, NetworkX, and some other analyses *like* treeish/dicty
things, and those analyses are also worth doing!
3c) Some analyses are best done using 'one-off' and custom code in C, Python,
or another 'real' programming language.
4) Arbitrary transforms are tedious and error prone. SQL is one solution,
XSLT is another,
5) the XPATH/XML/XSLT family is.... not universally loved :) They are
very complete, and the completeness can make simple cases... gross.
6) For really complicated data structures, we can write one-off code. Getting
80% of the way is mostly okay. There will always have to be programmers
in the loop.
7) Re-inventing SQL is probably a failure mode. So is reinventing XPATH, XSLT
and the like. Be wary of mission creep! Re-use when possible (e.g., can
we put the thing into a DOM using
8) If the interface is good, people can improve performance later.
Simplifying
---------------
1) Assuming 'jsonable' structures
2) keys are strings or stringlike. Python allows any hashable to be a key.
for now, we pretend that doesn't happen.
3) assumes most dicts are 'well behaved'. DAG, no cycles!
4) assume that if people want really specialized transforms, they can do it
themselves.
"""
from __future__ import print_function
from collections import namedtuple
import csv
import itertools
from itertools import product
from operator import attrgetter as aget, itemgetter as iget
import operator
import sys
from pandas.compat import map, u, callable, Counter
import pandas.compat as compat
## note 'url' appears multiple places and not all extensions have same struct
ex1 = {
'name': 'Gregg',
'extensions': [
{'id':'hello',
'url':'url1'},
{'id':'gbye',
'url':'url2',
'more': dict(url='url3')},
]
}
## much longer example
ex2 = {u('metadata'): {u('accessibilities'): [{u('name'): u('accessibility.tabfocus'),
u('value'): 7},
{u('name'): u('accessibility.mouse_focuses_formcontrol'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret'), u('value'): False},
{u('name'): u('accessibility.win32.force_disabled'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.startlinksonly'), u('value'): False},
{u('name'): u('accessibility.usebrailledisplay'), u('value'): u('')},
{u('name'): u('accessibility.typeaheadfind.timeout'), u('value'): 5000},
{u('name'): u('accessibility.typeaheadfind.enabletimeout'), u('value'): True},
{u('name'): u('accessibility.tabfocus_applies_to_xul'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.flashBar'), u('value'): 1},
{u('name'): u('accessibility.typeaheadfind.autostart'), u('value'): True},
{u('name'): u('accessibility.blockautorefresh'), u('value'): False},
{u('name'): u('accessibility.browsewithcaret_shortcut.enabled'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.enablesound'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.prefillwithselection'),
u('value'): True},
{u('name'): u('accessibility.typeaheadfind.soundURL'), u('value'): u('beep')},
{u('name'): u('accessibility.typeaheadfind'), u('value'): False},
{u('name'): u('accessibility.typeaheadfind.casesensitive'), u('value'): 0},
{u('name'): u('accessibility.warn_on_browsewithcaret'), u('value'): True},
{u('name'): u('accessibility.usetexttospeech'), u('value'): u('')},
{u('name'): u('accessibility.accesskeycausesactivation'), u('value'): True},
{u('name'): u('accessibility.typeaheadfind.linksonly'), u('value'): False},
{u('name'): u('isInstantiated'), u('value'): True}],
u('extensions'): [{u('id'): u('216ee7f7f4a5b8175374cd62150664efe2433a31'),
u('isEnabled'): True},
{u('id'): u('1aa53d3b720800c43c4ced5740a6e82bb0b3813e'), u('isEnabled'): False},
{u('id'): u('01ecfac5a7bd8c9e27b7c5499e71c2d285084b37'), u('isEnabled'): True},
{u('id'): u('1c01f5b22371b70b312ace94785f7b0b87c3dfb2'), u('isEnabled'): True},
{u('id'): u('fb723781a2385055f7d024788b75e959ad8ea8c3'), u('isEnabled'): True}],
u('fxVersion'): u('9.0'),
u('location'): u('zh-CN'),
u('operatingSystem'): u('WINNT Windows NT 5.1'),
u('surveyAnswers'): u(''),
u('task_guid'): u('d69fbd15-2517-45b5-8a17-bb7354122a75'),
u('tpVersion'): u('1.2'),
u('updateChannel'): u('beta')},
u('survey_data'): {
u('extensions'): [{u('appDisabled'): False,
u('id'): u('testpilot?labs.mozilla.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): u('Test Pilot')},
{u('appDisabled'): True,
u('id'): u('dict?www.youdao.com'),
u('isCompatible'): False,
u('isEnabled'): False,
u('isPlatformCompatible'): True,
u('name'): u('Youdao Word Capturer')},
{u('appDisabled'): False,
u('id'): u('jqs?sun.com'),
u('isCompatible'): True,
u('isEnabled'): True,
u('isPlatformCompatible'): True,
u('name'): | u('Java Quick Starter') | pandas.compat.u |
"""Automated data download and IO."""
# Builtins
import glob
import os
import gzip
import bz2
import hashlib
import shutil
import zipfile
import sys
import math
import logging
from functools import partial, wraps
import time
import fnmatch
import urllib.request
import urllib.error
from urllib.parse import urlparse
import socket
import multiprocessing
from netrc import netrc
import ftplib
import ssl
import tarfile
# External libs
import pandas as pd
import numpy as np
import shapely.geometry as shpg
import requests
# Optional libs
try:
import geopandas as gpd
except ImportError:
pass
try:
import salem
from salem import wgs84
except ImportError:
pass
try:
import rasterio
try:
# rasterio V > 1.0
from rasterio.merge import merge as merge_tool
except ImportError:
from rasterio.tools.merge import merge as merge_tool
except ImportError:
pass
try:
ModuleNotFoundError
except NameError:
ModuleNotFoundError = ImportError
# Locals
import oggm.cfg as cfg
from oggm.exceptions import (InvalidParamsError, NoInternetException,
DownloadVerificationFailedException,
DownloadCredentialsMissingException,
HttpDownloadError, HttpContentTooShortError,
InvalidDEMError, FTPSDownloadError)
# Module logger
logger = logging.getLogger('.'.join(__name__.split('.')[:-1]))
# Github repository and commit hash/branch name/tag name on that repository
# The given commit will be downloaded from github and used as source for
# all sample data
SAMPLE_DATA_GH_REPO = 'OGGM/oggm-sample-data'
SAMPLE_DATA_COMMIT = '98f6e299ab60b04cba9eb3be382231e19baf8c9e'
GDIR_L1L2_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'L1-L2_files/centerlines/')
GDIR_L3L5_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'L3-L5_files/CRU/centerlines/qc3/pcp2.5/no_match/')
DEMS_GDIR_URL = ('https://cluster.klima.uni-bremen.de/~oggm/gdirs/oggm_v1.4/'
'rgitopo/')
CHECKSUM_URL = 'https://cluster.klima.uni-bremen.de/data/downloads.sha256.hdf'
CHECKSUM_VALIDATION_URL = CHECKSUM_URL + '.sha256'
CHECKSUM_LIFETIME = 24 * 60 * 60
# Web mercator proj constants
WEB_N_PIX = 256
WEB_EARTH_RADUIS = 6378137.
DEM_SOURCES = ['GIMP', 'ARCTICDEM', 'RAMP', 'TANDEM', 'AW3D30', 'MAPZEN',
'DEM3', 'ASTER', 'SRTM', 'REMA', 'ALASKA', 'COPDEM', 'NASADEM']
DEM_SOURCES_PER_GLACIER = None
_RGI_METADATA = dict()
DEM3REG = {
'ISL': [-25., -13., 63., 67.], # Iceland
'SVALBARD': [9., 35.99, 75., 84.],
'JANMAYEN': [-10., -7., 70., 72.],
'FJ': [36., 68., 79., 90.], # Franz Josef Land
'FAR': [-8., -6., 61., 63.], # Faroer
'BEAR': [18., 20., 74., 75.], # Bear Island
'SHL': [-3., 0., 60., 61.], # Shetland
# Antarctica tiles as UTM zones, large files
'01-15': [-180., -91., -90, -60.],
'16-30': [-91., -1., -90., -60.],
'31-45': [-1., 89., -90., -60.],
'46-60': [89., 189., -90., -60.],
# Greenland tiles
'GL-North': [-72., -11., 76., 84.],
'GL-West': [-62., -42., 64., 76.],
'GL-South': [-52., -40., 59., 64.],
'GL-East': [-42., -17., 64., 76.]
}
# Function
tuple2int = partial(np.array, dtype=np.int64)
lock = None
def mkdir(path, reset=False):
"""Checks if directory exists and if not, create one.
Parameters
----------
reset: erase the content of the directory if exists
Returns
-------
the path
"""
if reset and os.path.exists(path):
shutil.rmtree(path)
try:
os.makedirs(path)
except FileExistsError:
pass
return path
def del_empty_dirs(s_dir):
"""Delete empty directories."""
b_empty = True
for s_target in os.listdir(s_dir):
s_path = os.path.join(s_dir, s_target)
if os.path.isdir(s_path):
if not del_empty_dirs(s_path):
b_empty = False
else:
b_empty = False
if b_empty:
os.rmdir(s_dir)
return b_empty
def findfiles(root_dir, endswith):
"""Finds all files with a specific ending in a directory
Parameters
----------
root_dir : str
The directory to search fo
endswith : str
The file ending (e.g. '.hgt'
Returns
-------
the list of files
"""
out = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for filename in [f for f in filenames if f.endswith(endswith)]:
out.append(os.path.join(dirpath, filename))
return out
def get_lock():
"""Get multiprocessing lock."""
global lock
if lock is None:
# Global Lock
if cfg.PARAMS.get('use_mp_spawn', False):
lock = multiprocessing.get_context('spawn').Lock()
else:
lock = multiprocessing.Lock()
return lock
def get_dl_verify_data(section):
"""Returns a pandas DataFrame with all known download object hashes.
The returned dictionary resolves str: cache_obj_name (without section)
to a tuple int(size) and bytes(sha256)
"""
verify_key = 'dl_verify_data_' + section
if cfg.DATA.get(verify_key) is not None:
return cfg.DATA[verify_key]
verify_file_path = os.path.join(cfg.CACHE_DIR, 'downloads.sha256.hdf')
def verify_file(force=False):
"""Check the hash file's own hash"""
if not cfg.PARAMS['has_internet']:
return
if not force and os.path.isfile(verify_file_path) and \
os.path.getmtime(verify_file_path) + CHECKSUM_LIFETIME > time.time():
return
logger.info('Checking the download verification file checksum...')
try:
with requests.get(CHECKSUM_VALIDATION_URL) as req:
req.raise_for_status()
verify_file_sha256 = req.text.split(maxsplit=1)[0]
verify_file_sha256 = bytearray.fromhex(verify_file_sha256)
except Exception as e:
verify_file_sha256 = None
logger.warning('Failed getting verification checksum: ' + repr(e))
if os.path.isfile(verify_file_path) and verify_file_sha256:
sha256 = hashlib.sha256()
with open(verify_file_path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
if sha256.digest() != verify_file_sha256:
logger.warning('%s changed or invalid, deleting.'
% (verify_file_path))
os.remove(verify_file_path)
else:
os.utime(verify_file_path)
if not np.any(['dl_verify_data_' in k for k in cfg.DATA.keys()]):
# We check the hash file only once per session
# no need to do it at each call
verify_file()
if not os.path.isfile(verify_file_path):
if not cfg.PARAMS['has_internet']:
return pd.DataFrame()
logger.info('Downloading %s to %s...'
% (CHECKSUM_URL, verify_file_path))
with requests.get(CHECKSUM_URL, stream=True) as req:
if req.status_code == 200:
mkdir(os.path.dirname(verify_file_path))
with open(verify_file_path, 'wb') as f:
for b in req.iter_content(chunk_size=0xFFFF):
if b:
f.write(b)
logger.info('Done downloading.')
verify_file(force=True)
if not os.path.isfile(verify_file_path):
logger.warning('Downloading and verifying checksums failed.')
return pd.DataFrame()
try:
data = pd.read_hdf(verify_file_path, key=section)
except KeyError:
data = pd.DataFrame()
cfg.DATA[verify_key] = data
return data
def _call_dl_func(dl_func, cache_path):
"""Helper so the actual call to downloads can be overridden
"""
return dl_func(cache_path)
def _cached_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Takes care of checking if the file is already cached.
Only calls the actual download function when no cached version exists.
"""
cache_dir = cfg.PATHS['dl_cache_dir']
cache_ro = cfg.PARAMS['dl_cache_readonly']
# A lot of logic below could be simplified but it's also not too important
wd = cfg.PATHS.get('working_dir')
if wd:
# this is for real runs
fb_cache_dir = os.path.join(wd, 'cache')
check_fb_dir = False
else:
# Nothing have been set up yet, this is bad - find a place to write
# This should happen on read-only cluster only but still
wd = os.environ.get('OGGM_WORKDIR')
if wd is not None and os.path.isdir(wd):
fb_cache_dir = os.path.join(wd, 'cache')
else:
fb_cache_dir = os.path.join(cfg.CACHE_DIR, 'cache')
check_fb_dir = True
if not cache_dir:
# Defaults to working directory: it must be set!
if not cfg.PATHS['working_dir']:
raise InvalidParamsError("Need a valid PATHS['working_dir']!")
cache_dir = fb_cache_dir
cache_ro = False
fb_path = os.path.join(fb_cache_dir, cache_obj_name)
if not reset and os.path.isfile(fb_path):
return fb_path
cache_path = os.path.join(cache_dir, cache_obj_name)
if not reset and os.path.isfile(cache_path):
return cache_path
if cache_ro:
if check_fb_dir:
# Add a manual check that we are caching sample data download
if 'oggm-sample-data' not in fb_path:
raise InvalidParamsError('Attempting to download something '
'with invalid global settings.')
cache_path = fb_path
if not cfg.PARAMS['has_internet']:
raise NoInternetException("Download required, but "
"`has_internet` is False.")
mkdir(os.path.dirname(cache_path))
try:
cache_path = _call_dl_func(dl_func, cache_path)
except BaseException:
if os.path.exists(cache_path):
os.remove(cache_path)
raise
return cache_path
def _verified_download_helper(cache_obj_name, dl_func, reset=False):
"""Helper function for downloads.
Verifies the size and hash of the downloaded file against the included
list of known static files.
Uses _cached_download_helper to perform the actual download.
"""
path = _cached_download_helper(cache_obj_name, dl_func, reset)
try:
dl_verify = cfg.PARAMS['dl_verify']
except KeyError:
dl_verify = True
if dl_verify and path and cache_obj_name not in cfg.DL_VERIFIED:
cache_section, cache_path = cache_obj_name.split('/', 1)
data = get_dl_verify_data(cache_section)
if cache_path not in data.index:
logger.info('No known hash for %s' % cache_obj_name)
cfg.DL_VERIFIED[cache_obj_name] = True
else:
# compute the hash
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for b in iter(lambda: f.read(0xFFFF), b''):
sha256.update(b)
sha256 = sha256.digest()
size = os.path.getsize(path)
# check
data = data.loc[cache_path]
if data['size'] != size or bytes(data['sha256']) != sha256:
err = '%s failed to verify!\nis: %s %s\nexpected: %s %s' % (
path, size, sha256.hex(), data[0], data[1].hex())
raise DownloadVerificationFailedException(msg=err, path=path)
logger.info('%s verified successfully.' % path)
cfg.DL_VERIFIED[cache_obj_name] = True
return path
def _requests_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Implements the required features of urlretrieve on top of requests
"""
chunk_size = 128 * 1024
chunk_count = 0
with requests.get(url, stream=True, auth=auth, timeout=timeout) as r:
if r.status_code != 200:
raise HttpDownloadError(r.status_code, url)
r.raise_for_status()
size = r.headers.get('content-length') or -1
size = int(size)
if reporthook:
reporthook(chunk_count, chunk_size, size)
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_size):
if not chunk:
continue
f.write(chunk)
chunk_count += 1
if reporthook:
reporthook(chunk_count, chunk_size, size)
if chunk_count * chunk_size < size:
raise HttpContentTooShortError()
def _classic_urlretrieve(url, path, reporthook, auth=None, timeout=None):
"""Thin wrapper around pythons urllib urlretrieve
"""
ourl = url
if auth:
u = urlparse(url)
if '@' not in u.netloc:
netloc = auth[0] + ':' + auth[1] + '@' + u.netloc
url = u._replace(netloc=netloc).geturl()
old_def_timeout = socket.getdefaulttimeout()
if timeout is not None:
socket.setdefaulttimeout(timeout)
try:
urllib.request.urlretrieve(url, path, reporthook)
except urllib.error.HTTPError as e:
raise HttpDownloadError(e.code, ourl)
except urllib.error.ContentTooShortError as e:
raise HttpContentTooShortError()
finally:
socket.setdefaulttimeout(old_def_timeout)
class ImplicitFTPTLS(ftplib.FTP_TLS):
""" FTP_TLS subclass that automatically wraps sockets in SSL to support
implicit FTPS.
Taken from https://stackoverflow.com/a/36049814
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._sock = None
@property
def sock(self):
"""Return the socket."""
return self._sock
@sock.setter
def sock(self, value):
"""When modifying the socket, ensure that it is ssl wrapped."""
if value is not None and not isinstance(value, ssl.SSLSocket):
value = self.context.wrap_socket(value)
self._sock = value
def url_exists(url):
"""Checks if a given a URL exists or not."""
request = requests.get(url)
return request.status_code < 400
def _ftps_retrieve(url, path, reporthook, auth=None, timeout=None):
""" Wrapper around ftplib to download from FTPS server
"""
if not auth:
raise DownloadCredentialsMissingException('No authentication '
'credentials given!')
upar = urlparse(url)
# Decide if Implicit or Explicit FTPS is used based on the port in url
if upar.port == 990:
ftps = ImplicitFTPTLS()
elif upar.port == 21:
ftps = ftplib.FTP_TLS()
try:
# establish ssl connection
ftps.connect(host=upar.hostname, port=upar.port, timeout=timeout)
ftps.login(user=auth[0], passwd=auth[1])
ftps.prot_p()
logger.info('Established connection %s' % upar.hostname)
# meta for progress bar size
count = 0
total = ftps.size(upar.path)
bs = 12*1024
def _ftps_progress(data):
outfile.write(data)
nonlocal count
count += 1
reporthook(count, count*bs, total)
with open(path, 'wb') as outfile:
ftps.retrbinary('RETR ' + upar.path, _ftps_progress, blocksize=bs)
except (ftplib.error_perm, socket.timeout, socket.gaierror) as err:
raise FTPSDownloadError(err)
finally:
ftps.close()
def _get_url_cache_name(url):
"""Returns the cache name for any given url.
"""
res = urlparse(url)
return res.netloc.split(':', 1)[0] + res.path
def oggm_urlretrieve(url, cache_obj_name=None, reset=False,
reporthook=None, auth=None, timeout=None):
"""Wrapper around urlretrieve, to implement our caching logic.
Instead of accepting a destination path, it decided where to store the file
and returns the local path.
auth is expected to be either a tuple of ('username', 'password') or None.
"""
if cache_obj_name is None:
cache_obj_name = _get_url_cache_name(url)
def _dlf(cache_path):
logger.info("Downloading %s to %s..." % (url, cache_path))
try:
_requests_urlretrieve(url, cache_path, reporthook, auth, timeout)
except requests.exceptions.InvalidSchema:
if 'ftps://' in url:
_ftps_retrieve(url, cache_path, reporthook, auth, timeout)
else:
_classic_urlretrieve(url, cache_path, reporthook, auth,
timeout)
return cache_path
return _verified_download_helper(cache_obj_name, _dlf, reset)
def _progress_urlretrieve(url, cache_name=None, reset=False,
auth=None, timeout=None):
"""Downloads a file, returns its local path, and shows a progressbar."""
try:
from progressbar import DataTransferBar, UnknownLength
pbar = None
def _upd(count, size, total):
nonlocal pbar
if pbar is None:
pbar = DataTransferBar()
if not pbar.is_terminal:
pbar.min_poll_interval = 15
if pbar.max_value is None:
if total > 0:
pbar.start(total)
else:
pbar.start(UnknownLength)
pbar.update(min(count * size, total))
sys.stdout.flush()
res = oggm_urlretrieve(url, cache_obj_name=cache_name, reset=reset,
reporthook=_upd, auth=auth, timeout=timeout)
try:
pbar.finish()
except BaseException:
pass
return res
except (ImportError, ModuleNotFoundError):
return oggm_urlretrieve(url, cache_obj_name=cache_name,
reset=reset, auth=auth, timeout=timeout)
def aws_file_download(aws_path, cache_name=None, reset=False):
with get_lock():
return _aws_file_download_unlocked(aws_path, cache_name, reset)
def _aws_file_download_unlocked(aws_path, cache_name=None, reset=False):
"""Download a file from the AWS drive s3://astgtmv2/
**Note:** you need AWS credentials for this to work.
Parameters
----------
aws_path: path relative to s3://astgtmv2/
"""
while aws_path.startswith('/'):
aws_path = aws_path[1:]
if cache_name is not None:
cache_obj_name = cache_name
else:
cache_obj_name = 'astgtmv2/' + aws_path
def _dlf(cache_path):
raise NotImplementedError("Downloads from AWS are no longer supported")
return _verified_download_helper(cache_obj_name, _dlf, reset)
def file_downloader(www_path, retry_max=5, cache_name=None,
reset=False, auth=None, timeout=None):
"""A slightly better downloader: it tries more than once."""
local_path = None
retry_counter = 0
while retry_counter <= retry_max:
# Try to download
try:
retry_counter += 1
local_path = _progress_urlretrieve(www_path, cache_name=cache_name,
reset=reset, auth=auth,
timeout=timeout)
# if no error, exit
break
except HttpDownloadError as err:
# This works well for py3
if err.code == 404 or err.code == 300:
# Ok so this *should* be an ocean tile
return None
elif err.code >= 500 and err.code < 600:
logger.info("Downloading %s failed with HTTP error %s, "
"retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
else:
raise
except HttpContentTooShortError as err:
logger.info("Downloading %s failed with ContentTooShortError"
" error %s, retrying in 10 seconds... %s/%s" %
(www_path, err.code, retry_counter, retry_max))
time.sleep(10)
continue
except DownloadVerificationFailedException as err:
if (cfg.PATHS['dl_cache_dir'] and
err.path.startswith(cfg.PATHS['dl_cache_dir']) and
cfg.PARAMS['dl_cache_readonly']):
if not cache_name:
cache_name = _get_url_cache_name(www_path)
cache_name = "GLOBAL_CACHE_INVALID/" + cache_name
retry_counter -= 1
logger.info("Global cache for %s is invalid!")
else:
try:
os.remove(err.path)
except FileNotFoundError:
pass
logger.info("Downloading %s failed with "
"DownloadVerificationFailedException\n %s\n"
"The file might have changed or is corrupted. "
"File deleted. Re-downloading... %s/%s" %
(www_path, err.msg, retry_counter, retry_max))
continue
except requests.ConnectionError as err:
if err.args[0].__class__.__name__ == 'MaxRetryError':
# if request tried often enough we don't have to do this
# this error does happen for not existing ASTERv3 files
return None
else:
# in other cases: try again
logger.info("Downloading %s failed with ConnectionError, "
"retrying in 10 seconds... %s/%s" %
(www_path, retry_counter, retry_max))
time.sleep(10)
continue
except FTPSDownloadError as err:
logger.info("Downloading %s failed with FTPSDownloadError"
" error: '%s', retrying in 10 seconds... %s/%s" %
(www_path, err.orgerr, retry_counter, retry_max))
time.sleep(10)
continue
# See if we managed (fail is allowed)
if not local_path or not os.path.exists(local_path):
logger.warning('Downloading %s failed.' % www_path)
return local_path
def locked_func(func):
"""To decorate a function that needs to be locked for multiprocessing"""
@wraps(func)
def wrapper(*args, **kwargs):
with get_lock():
return func(*args, **kwargs)
return wrapper
def file_extractor(file_path):
"""For archives with only one file inside extract the file to tmpdir."""
filename, file_extension = os.path.splitext(file_path)
# Second one for tar.gz files
f2, ex2 = os.path.splitext(filename)
if ex2 == '.tar':
filename, file_extension = f2, '.tar.gz'
bname = os.path.basename(file_path)
# This is to give a unique name to the tmp file
hid = hashlib.md5(file_path.encode()).hexdigest()[:7] + '_'
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# Check output extension
def _check_ext(f):
_, of_ext = os.path.splitext(f)
if of_ext not in ['.nc', '.tif']:
raise InvalidParamsError('Extracted file extension not recognized'
': {}'.format(of_ext))
return of_ext
if file_extension == '.zip':
with zipfile.ZipFile(file_path) as zf:
members = zf.namelist()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0]
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.read(members[0]))
elif file_extension == '.gz':
# Gzip files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with gzip.GzipFile(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension == '.bz2':
# bzip2 files cannot be inspected. It's always only one file
# Decide on its name
o_name = hid + os.path.basename(filename)
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with bz2.open(file_path) as zf:
with open(o_path, 'wb') as outfile:
for line in zf:
outfile.write(line)
elif file_extension in ['.tar.gz', '.tar']:
with tarfile.open(file_path) as zf:
members = zf.getmembers()
if len(members) != 1:
raise RuntimeError('Cannot extract multiple files')
o_name = hid + members[0].name
o_path = os.path.join(tmpdir, o_name)
of_ext = _check_ext(o_path)
if not os.path.exists(o_path):
logger.info('Extracting {} to {}...'.format(bname, o_path))
with open(o_path, 'wb') as f:
f.write(zf.extractfile(members[0]).read())
else:
raise InvalidParamsError('Extension not recognized: '
'{}'.format(file_extension))
# Be sure we don't overfill the folder
cfg.get_lru_handler(tmpdir, ending=of_ext).append(o_path)
return o_path
def download_with_authentication(wwwfile, key):
""" Uses credentials from a local .netrc file to download files
This is function is currently used for TanDEM-X and ASTER
Parameters
----------
wwwfile : str
path to the file to download
key : str
the machine to to look at in the .netrc file
Returns
-------
"""
# Check the cache first. Use dummy download function to assure nothing is
# tried to be downloaded without credentials:
def _always_none(foo):
return None
cache_obj_name = _get_url_cache_name(wwwfile)
dest_file = _verified_download_helper(cache_obj_name, _always_none)
# Grab auth parameters
if not dest_file:
authfile = os.path.expanduser('~/.netrc')
if not os.path.isfile(authfile):
raise DownloadCredentialsMissingException(
(authfile, ' does not exist. Add necessary credentials for ',
key, ' with `oggm_netrc_credentials. You may have to ',
'register at the respective service first.'))
try:
netrc(authfile).authenticators(key)[0]
except TypeError:
raise DownloadCredentialsMissingException(
('Credentials for ', key, ' are not in ', authfile, '. Add ',
'credentials for with `oggm_netrc_credentials`.'))
dest_file = file_downloader(
wwwfile, auth=(netrc(authfile).authenticators(key)[0],
netrc(authfile).authenticators(key)[2]))
return dest_file
def download_oggm_files():
with get_lock():
return _download_oggm_files_unlocked()
def _download_oggm_files_unlocked():
"""Checks if the demo data is already on the cache and downloads it."""
zip_url = 'https://github.com/%s/archive/%s.zip' % \
(SAMPLE_DATA_GH_REPO, SAMPLE_DATA_COMMIT)
odir = os.path.join(cfg.CACHE_DIR)
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT)
# download only if necessary
if not os.path.exists(sdir):
ofile = file_downloader(zip_url)
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
assert os.path.isdir(sdir)
# list of files for output
out = dict()
for root, directories, filenames in os.walk(sdir):
for filename in filenames:
if filename in out:
# This was a stupid thing, and should not happen
# TODO: duplicates in sample data...
k = os.path.join(os.path.basename(root), filename)
assert k not in out
out[k] = os.path.join(root, filename)
else:
out[filename] = os.path.join(root, filename)
return out
def _download_srtm_file(zone):
with get_lock():
return _download_srtm_file_unlocked(zone)
def _download_srtm_file_unlocked(zone):
"""Checks if the srtm data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, 'srtm_' + zone + '.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
wwwfile = ('http://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/'
'TIFF/srtm_' + zone + '.zip')
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_nasadem_file(zone):
with get_lock():
return _download_nasadem_file_unlocked(zone)
def _download_nasadem_file_unlocked(zone):
"""Checks if the NASADEM data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/MEASURES/NASADEM_HGT.001/'
'2000.02.11/NASADEM_HGT_{}.zip'.format(zone))
demfile = '{}.hgt'.format(zone)
outpath = os.path.join(tmpdir, demfile)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# Did we download it yet?
dest_file = file_downloader(wwwfile)
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extract(demfile, path=tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_tandem_file(zone):
with get_lock():
return _download_tandem_file_unlocked(zone)
def _download_tandem_file_unlocked(zone):
"""Checks if the tandem data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
bname = zone.split('/')[-1] + '_DEM.tif'
wwwfile = ('https://download.geoservice.dlr.de/TDM90/files/'
'{}.zip'.format(zone))
outpath = os.path.join(tmpdir, bname)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
dest_file = download_with_authentication(wwwfile, 'geoservice.dlr.de')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
elif not zipfile.is_zipfile(dest_file):
# If the TanDEM-X tile does not exist, a invalid file is created.
# See https://github.com/OGGM/oggm/issues/893 for more details
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
for fn in zf.namelist():
if 'DEM/' + bname in fn:
break
with open(outpath, 'wb') as fo:
fo.write(zf.read(fn))
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_dem3_viewpano(zone):
with get_lock():
return _download_dem3_viewpano_unlocked(zone)
def _download_dem3_viewpano_unlocked(zone):
"""Checks if the DEM3 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, zone + '.tif')
extract_dir = os.path.join(tmpdir, 'tmp_' + zone)
mkdir(extract_dir, reset=True)
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# OK, so see if downloaded already
# some files have a newer version 'v2'
if zone in ['R33', 'R34', 'R35', 'R36', 'R37', 'R38', 'Q32', 'Q33', 'Q34',
'Q35', 'Q36', 'Q37', 'Q38', 'Q39', 'Q40', 'P31', 'P32', 'P33',
'P34', 'P35', 'P36', 'P37', 'P38', 'P39', 'P40']:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + 'v2.zip'
elif zone in DEM3REG.keys():
# We prepared these files as tif already
ifile = ('https://cluster.klima.uni-bremen.de/~oggm/dem/'
'DEM3_MERGED/{}.tif'.format(zone))
return file_downloader(ifile)
else:
ifile = 'http://viewfinderpanoramas.org/dem3/' + zone + '.zip'
dfile = file_downloader(ifile)
# None means we tried hard but we couldn't find it
if not dfile:
return None
# ok we have to extract it
with zipfile.ZipFile(dfile) as zf:
zf.extractall(extract_dir)
# Serious issue: sometimes, if a southern hemisphere URL is queried for
# download and there is none, a NH zip file is downloaded.
# Example: http://viewfinderpanoramas.org/dem3/SN29.zip yields N29!
# BUT: There are southern hemisphere files that download properly. However,
# the unzipped folder has the file name of
# the northern hemisphere file. Some checks if correct file exists:
if len(zone) == 4 and zone.startswith('S'):
zonedir = os.path.join(extract_dir, zone[1:])
else:
zonedir = os.path.join(extract_dir, zone)
globlist = glob.glob(os.path.join(zonedir, '*.hgt'))
# take care of the special file naming cases
if zone in DEM3REG.keys():
globlist = glob.glob(os.path.join(extract_dir, '*', '*.hgt'))
if not globlist:
# Final resort
globlist = (findfiles(extract_dir, '.hgt') or
findfiles(extract_dir, '.HGT'))
if not globlist:
raise RuntimeError("We should have some files here, but we don't")
# merge the single HGT files (can be a bit ineffective, because not every
# single file might be exactly within extent...)
rfiles = [rasterio.open(s) for s in globlist]
dest, output_transform = merge_tool(rfiles)
profile = rfiles[0].profile
if 'affine' in profile:
profile.pop('affine')
profile['transform'] = output_transform
profile['height'] = dest.shape[1]
profile['width'] = dest.shape[2]
profile['driver'] = 'GTiff'
with rasterio.open(outpath, 'w', **profile) as dst:
dst.write(dest)
for rf in rfiles:
rf.close()
# delete original files to spare disk space
for s in globlist:
os.remove(s)
del_empty_dirs(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_aster_file(zone):
with get_lock():
return _download_aster_file_unlocked(zone)
def _download_aster_file_unlocked(zone):
"""Checks if the ASTER data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
wwwfile = ('https://e4ftl01.cr.usgs.gov/ASTER_B/ASTT/ASTGTM.003/'
'2000.03.01/{}.zip'.format(zone))
outpath = os.path.join(tmpdir, zone + '_dem.tif')
# check if extracted file exists already
if os.path.exists(outpath):
return outpath
# download from NASA Earthdata with credentials
dest_file = download_with_authentication(wwwfile, 'urs.earthdata.nasa.gov')
# That means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(outpath):
with zipfile.ZipFile(dest_file) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_topo_file_from_cluster(fname):
with get_lock():
return _download_topo_file_from_cluster_unlocked(fname)
def _download_topo_file_from_cluster_unlocked(fname):
"""Checks if the special topo data is in the directory and if not,
download it from the cluster.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
outpath = os.path.join(tmpdir, fname)
url = 'https://cluster.klima.uni-bremen.de/data/dems/'
url += fname + '.zip'
dfile = file_downloader(url)
if not os.path.exists(outpath):
logger.info('Extracting ' + fname + '.zip to ' + outpath + '...')
with zipfile.ZipFile(dfile) as zf:
zf.extractall(tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(outpath)
cfg.get_lru_handler(tmpdir).append(outpath)
return outpath
def _download_copdem_file(cppfile, tilename):
with get_lock():
return _download_copdem_file_unlocked(cppfile, tilename)
def _download_copdem_file_unlocked(cppfile, tilename):
"""Checks if Copernicus DEM file is in the directory, if not download it.
cppfile : name of the tarfile to download
tilename : name of folder and tif file within the cppfile
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
fpath = '{0}_DEM.tif'.format(tilename)
demfile = os.path.join(tmpdir, fpath)
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftps://cdsdata.copernicus.eu:990/' +
'datasets/COP-DEM_GLO-90-DGED/2019_1/' +
cppfile)
dest_file = download_with_authentication(ftpfile,
'spacedata.copernicus.eu')
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
tiffile = os.path.join(tilename, 'DEM', fpath)
with tarfile.open(dest_file) as tf:
tmember = tf.getmember(tiffile)
# do not extract the full path of the file
tmember.name = os.path.basename(tf.getmember(tiffile).name)
tf.extract(tmember, tmpdir)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
cfg.get_lru_handler(tmpdir).append(demfile)
return demfile
def _download_aw3d30_file(zone):
with get_lock():
return _download_aw3d30_file_unlocked(zone)
def _download_aw3d30_file_unlocked(fullzone):
"""Checks if the AW3D30 data is in the directory and if not, download it.
"""
# extract directory
tmpdir = cfg.PATHS['tmp_dir']
mkdir(tmpdir)
# tarfiles are extracted in directories per each tile
tile = fullzone.split('/')[1]
demfile = os.path.join(tmpdir, tile, tile + '_AVE_DSM.tif')
# check if extracted file exists already
if os.path.exists(demfile):
return demfile
# Did we download it yet?
ftpfile = ('ftp://ftp.eorc.jaxa.jp/pub/ALOS/ext1/AW3D30/release_v1804/'
+ fullzone + '.tar.gz')
try:
dest_file = file_downloader(ftpfile, timeout=180)
except urllib.error.URLError:
# This error is raised if file is not available, could be water
return None
# None means we tried hard but we couldn't find it
if not dest_file:
return None
# ok we have to extract it
if not os.path.exists(demfile):
from oggm.utils import robust_tar_extract
dempath = os.path.dirname(demfile)
robust_tar_extract(dest_file, dempath)
# See if we're good, don't overfill the tmp directory
assert os.path.exists(demfile)
# this tarfile contains several files
for file in os.listdir(dempath):
cfg.get_lru_handler(tmpdir).append(os.path.join(dempath, file))
return demfile
def _download_mapzen_file(zone):
with get_lock():
return _download_mapzen_file_unlocked(zone)
def _download_mapzen_file_unlocked(zone):
"""Checks if the mapzen data is in the directory and if not, download it.
"""
bucket = 'elevation-tiles-prod'
prefix = 'geotiff'
url = 'http://s3.amazonaws.com/%s/%s/%s' % (bucket, prefix, zone)
# That's all
return file_downloader(url, timeout=180)
def get_prepro_gdir(rgi_version, rgi_id, border, prepro_level, base_url=None):
with get_lock():
return _get_prepro_gdir_unlocked(rgi_version, rgi_id, border,
prepro_level, base_url=base_url)
def get_prepro_base_url(base_url=None, rgi_version=None, border=None,
prepro_level=None):
"""Extended base url where to find the desired gdirs."""
if base_url is None:
if prepro_level <= 2:
base_url = GDIR_L1L2_URL
else:
base_url = GDIR_L3L5_URL
if not base_url.endswith('/'):
base_url += '/'
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
if border is None:
border = cfg.PARAMS['border']
url = base_url
url += 'RGI{}/'.format(rgi_version)
url += 'b_{:03d}/'.format(int(border))
url += 'L{:d}/'.format(prepro_level)
return url
def _get_prepro_gdir_unlocked(rgi_version, rgi_id, border, prepro_level,
base_url=None):
url = get_prepro_base_url(rgi_version=rgi_version, border=border,
prepro_level=prepro_level, base_url=base_url)
url += '{}/{}.tar' .format(rgi_id[:8], rgi_id[:11])
tar_base = file_downloader(url)
if tar_base is None:
raise RuntimeError('Could not find file at ' + url)
return tar_base
def get_geodetic_mb_dataframe(file_path=None):
"""Fetches the reference geodetic dataframe for calibration.
Currently that's the data from Hughonnet et al 2021, corrected for
outliers and with void filled. The data preparation script is
available at
https://nbviewer.jupyter.org/urls/cluster.klima.uni-bremen.de/~oggm/geodetic_ref_mb/convert.ipynb
Parameters
----------
file_path : str
in case you have your own file to parse (check the format first!)
Returns
-------
a DataFrame with the data.
"""
# fetch the file online or read custom file
if file_path is None:
base_url = 'https://cluster.klima.uni-bremen.de/~oggm/geodetic_ref_mb/'
file_name = 'hugonnet_2021_ds_rgi60_pergla_rates_10_20_worldwide_filled.hdf'
file_path = file_downloader(base_url + file_name)
# Did we open it yet?
if file_path in cfg.DATA:
return cfg.DATA[file_path]
# If not let's go
extension = os.path.splitext(file_path)[1]
if extension == '.csv':
df = pd.read_csv(file_path, index_col=0)
elif extension == '.hdf':
df = pd.read_hdf(file_path)
# Check for missing data (old files)
if len(df.loc[df['dmdtda'].isnull()]) > 0:
raise InvalidParamsError('The reference file you are using has missing '
'data and is probably outdated (sorry for '
'that). Delete the file at '
f'{file_path} and start again.')
cfg.DATA[file_path] = df
return df
def srtm_zone(lon_ex, lat_ex):
"""Returns a list of SRTM zones covering the desired extent.
"""
# SRTM are sorted in tiles of 5 degrees
srtm_x0 = -180.
srtm_y0 = 60.
srtm_dx = 5.
srtm_dy = -5.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# int() to avoid Deprec warning:
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) + 3)))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
assert dy < 0
zx = np.ceil(dx / srtm_dx)
zy = np.ceil(dy / srtm_dy)
zones.append('{:02.0f}_{:02.0f}'.format(zx, zy))
return list(sorted(set(zones)))
def _tandem_path(lon_tile, lat_tile):
# OK we have a proper tile now
# First folder level is sorted from S to N
level_0 = 'S' if lat_tile < 0 else 'N'
level_0 += '{:02d}'.format(abs(lat_tile))
# Second folder level is sorted from W to E, but in 10 steps
level_1 = 'W' if lon_tile < 0 else 'E'
level_1 += '{:03d}'.format(divmod(abs(lon_tile), 10)[0] * 10)
# Level 2 is formating, but depends on lat
level_2 = 'W' if lon_tile < 0 else 'E'
if abs(lat_tile) <= 60:
level_2 += '{:03d}'.format(abs(lon_tile))
elif abs(lat_tile) <= 80:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 2)[0] * 2)
else:
level_2 += '{:03d}'.format(divmod(abs(lon_tile), 4)[0] * 4)
# Final path
out = (level_0 + '/' + level_1 + '/' +
'TDM1_DEM__30_{}{}'.format(level_0, level_2))
return out
def tandem_zone(lon_ex, lat_ex):
"""Returns a list of TanDEM-X zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
# For higher lats they are stored in steps of 2 and 4. My code below
# is probably giving more files than needed but better safe than sorry
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=int)
zones = []
for lat in lat_tiles:
if abs(lat) < 60:
l0 = np.floor(lon_ex[0])
l1 = np.floor(lon_ex[1])
elif abs(lat) < 80:
l0 = divmod(lon_ex[0], 2)[0] * 2
l1 = divmod(lon_ex[1], 2)[0] * 2
elif abs(lat) < 90:
l0 = divmod(lon_ex[0], 4)[0] * 4
l1 = divmod(lon_ex[1], 4)[0] * 4
lon_tiles = np.arange(l0, l1+1, dtype=int)
for lon in lon_tiles:
zones.append(_tandem_path(lon, lat))
return list(sorted(set(zones)))
def _aw3d30_path(lon_tile, lat_tile):
# OK we have a proper tile now
# Folders are sorted with N E S W in 5 degree steps
# But in N and E the lower boundary is indicated
# e.g. N060 contains N060 - N064
# e.g. E000 contains E000 - E004
# but S and W indicate the upper boundary:
# e.g. S010 contains S006 - S010
# e.g. W095 contains W091 - W095
# get letters
ns = 'S' if lat_tile < 0 else 'N'
ew = 'W' if lon_tile < 0 else 'E'
# get lat/lon
lon = abs(5 * np.floor(lon_tile/5))
lat = abs(5 * np.floor(lat_tile/5))
folder = '%s%.3d%s%.3d' % (ns, lat, ew, lon)
filename = '%s%.3d%s%.3d' % (ns, abs(lat_tile), ew, abs(lon_tile))
# Final path
out = folder + '/' + filename
return out
def aw3d30_zone(lon_ex, lat_ex):
"""Returns a list of AW3D30 zones covering the desired extent.
"""
# Files are one by one tiles, so lets loop over them
lon_tiles = np.arange(np.floor(lon_ex[0]), np.ceil(lon_ex[1]+1e-9),
dtype=int)
lat_tiles = np.arange(np.floor(lat_ex[0]), np.ceil(lat_ex[1]+1e-9),
dtype=int)
zones = []
for lon in lon_tiles:
for lat in lat_tiles:
zones.append(_aw3d30_path(lon, lat))
return list(sorted(set(zones)))
def _extent_to_polygon(lon_ex, lat_ex, to_crs=None):
if lon_ex[0] == lon_ex[1] and lat_ex[0] == lat_ex[1]:
out = shpg.Point(lon_ex[0], lat_ex[0])
else:
x = [lon_ex[0], lon_ex[1], lon_ex[1], lon_ex[0], lon_ex[0]]
y = [lat_ex[0], lat_ex[0], lat_ex[1], lat_ex[1], lat_ex[0]]
out = shpg.Polygon(np.array((x, y)).T)
if to_crs is not None:
out = salem.transform_geometry(out, to_crs=to_crs)
return out
def arcticdem_zone(lon_ex, lat_ex):
"""Returns a list of Arctic-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('ArcticDEM_Tile_Index_Rel7_by_tile.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def rema_zone(lon_ex, lat_ex):
"""Returns a list of REMA-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('REMA_Tile_Index_Rel1.1.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def alaska_dem_zone(lon_ex, lat_ex):
"""Returns a list of Alaska-DEM zones covering the desired extent.
"""
gdf = gpd.read_file(get_demo_file('Alaska_albers_V3_tiles.shp'))
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
return gdf.tile.values if len(gdf) > 0 else []
def copdem_zone(lon_ex, lat_ex):
"""Returns a list of Copernicus DEM tarfile and tilename tuples
"""
# path to the lookup shapefiles
gdf = gpd.read_file(get_demo_file('RGI60_COPDEM_lookup.shp'))
# intersect with lat lon extents
p = _extent_to_polygon(lon_ex, lat_ex, to_crs=gdf.crs)
gdf = gdf.loc[gdf.intersects(p)]
# COPDEM is global, if we miss all tiles it is worth an error
if len(gdf) == 0:
raise InvalidDEMError('Could not find any Copernicus DEM tile.')
flist = []
for _, g in gdf.iterrows():
cpp = g['CPP File']
eop = g['Eop Id']
eop = eop.split(':')[-2]
assert 'Copernicus' in eop
flist.append((cpp, eop))
return flist
def dem3_viewpano_zone(lon_ex, lat_ex):
"""Returns a list of DEM3 zones covering the desired extent.
http://viewfinderpanoramas.org/Coverage%20map%20viewfinderpanoramas_org3.htm
"""
for _f in DEM3REG.keys():
if (np.min(lon_ex) >= DEM3REG[_f][0]) and \
(np.max(lon_ex) <= DEM3REG[_f][1]) and \
(np.min(lat_ex) >= DEM3REG[_f][2]) and \
(np.max(lat_ex) <= DEM3REG[_f][3]):
# test some weird inset files in Antarctica
if (np.min(lon_ex) >= -91.) and (np.max(lon_ex) <= -90.) and \
(np.min(lat_ex) >= -72.) and (np.max(lat_ex) <= -68.):
return ['SR15']
elif (np.min(lon_ex) >= -47.) and (np.max(lon_ex) <= -43.) and \
(np.min(lat_ex) >= -61.) and (np.max(lat_ex) <= -60.):
return ['SP23']
elif (np.min(lon_ex) >= 162.) and (np.max(lon_ex) <= 165.) and \
(np.min(lat_ex) >= -68.) and (np.max(lat_ex) <= -66.):
return ['SQ58']
# test some rogue Greenland tiles as well
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 76.) and (np.max(lat_ex) <= 80.):
return ['T19']
elif (np.min(lon_ex) >= -72.) and (np.max(lon_ex) <= -66.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U19']
elif (np.min(lon_ex) >= -66.) and (np.max(lon_ex) <= -60.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U20']
elif (np.min(lon_ex) >= -60.) and (np.max(lon_ex) <= -54.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U21']
elif (np.min(lon_ex) >= -54.) and (np.max(lon_ex) <= -48.) and \
(np.min(lat_ex) >= 80.) and (np.max(lat_ex) <= 83.):
return ['U22']
elif (np.min(lon_ex) >= -25.) and (np.max(lon_ex) <= -13.) and \
(np.min(lat_ex) >= 63.) and (np.max(lat_ex) <= 67.):
return ['ISL']
else:
return [_f]
# if the tile doesn't have a special name, its name can be found like this:
# corrected SRTMs are sorted in tiles of 6 deg longitude and 4 deg latitude
srtm_x0 = -180.
srtm_y0 = 0.
srtm_dx = 6.
srtm_dy = 4.
# quick n dirty solution to be sure that we will cover the whole range
mi, ma = np.min(lon_ex), np.max(lon_ex)
# TODO: Fabien, find out what Johannes wanted with this +3
# +3 is just for the number to become still a bit larger
# int() to avoid Deprec warning
lon_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dy) + 3))
mi, ma = np.min(lat_ex), np.max(lat_ex)
# int() to avoid Deprec warning
lat_ex = np.linspace(mi, ma, int(np.ceil((ma - mi) / srtm_dx) + 3))
zones = []
for lon in lon_ex:
for lat in lat_ex:
dx = lon - srtm_x0
dy = lat - srtm_y0
zx = np.ceil(dx / srtm_dx)
# convert number to letter
zy = chr(int(abs(dy / srtm_dy)) + ord('A'))
if lat >= 0:
zones.append('%s%02.0f' % (zy, zx))
else:
zones.append('S%s%02.0f' % (zy, zx))
return list(sorted(set(zones)))
def aster_zone(lon_ex, lat_ex):
"""Returns a list of ASTGTMV3 zones covering the desired extent.
ASTER v3 tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 'S' if lat < 0 else 'N'
for lon in lons:
# east or west?
ew = 'W' if lon < 0 else 'E'
filename = 'ASTGTMV003_{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat),
ew, abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def nasadem_zone(lon_ex, lat_ex):
"""Returns a list of NASADEM zones covering the desired extent.
NASADEM tiles are 1 degree x 1 degree
N50 contains 50 to 50.9
E10 contains 10 to 10.9
S70 contains -69.99 to -69.0
W20 contains -19.99 to -19.0
"""
# adding small buffer for unlikely case where one lon/lat_ex == xx.0
lons = np.arange(np.floor(lon_ex[0]-1e-9), np.ceil(lon_ex[1]+1e-9))
lats = np.arange(np.floor(lat_ex[0]-1e-9), np.ceil(lat_ex[1]+1e-9))
zones = []
for lat in lats:
# north or south?
ns = 's' if lat < 0 else 'n'
for lon in lons:
# east or west?
ew = 'w' if lon < 0 else 'e'
filename = '{}{:02.0f}{}{:03.0f}'.format(ns, abs(lat), ew,
abs(lon))
zones.append(filename)
return list(sorted(set(zones)))
def mapzen_zone(lon_ex, lat_ex, dx_meter=None, zoom=None):
"""Returns a list of AWS mapzen zones covering the desired extent.
For mapzen one has to specify the level of detail (zoom) one wants. The
best way in OGGM is to specify dx_meter of the underlying map and OGGM
will decide which zoom level works best.
"""
if dx_meter is None and zoom is None:
raise InvalidParamsError('Need either zoom level or dx_meter.')
bottom, top = lat_ex
left, right = lon_ex
ybound = 85.0511
if bottom <= -ybound:
bottom = -ybound
if top <= -ybound:
top = -ybound
if bottom > ybound:
bottom = ybound
if top > ybound:
top = ybound
if right >= 180:
right = 179.999
if left >= 180:
left = 179.999
if dx_meter:
# Find out the zoom so that we are close to the desired accuracy
lat = np.max(np.abs([bottom, top]))
zoom = int(np.ceil(math.log2((math.cos(lat * math.pi / 180) *
2 * math.pi * WEB_EARTH_RADUIS) /
(WEB_N_PIX * dx_meter))))
# According to this we should just always stay above 10 (sorry)
# https://github.com/tilezen/joerd/blob/master/docs/data-sources.md
zoom = 10 if zoom < 10 else zoom
# Code from planetutils
size = 2 ** zoom
xt = lambda x: int((x + 180.0) / 360.0 * size)
yt = lambda y: int((1.0 - math.log(math.tan(math.radians(y)) +
(1 / math.cos(math.radians(y))))
/ math.pi) / 2.0 * size)
tiles = []
for x in range(xt(left), xt(right) + 1):
for y in range(yt(top), yt(bottom) + 1):
tiles.append('/'.join(map(str, [zoom, x, str(y) + '.tif'])))
return tiles
def get_demo_file(fname):
"""Returns the path to the desired OGGM-sample-file.
If Sample data is not cached it will be downloaded from
https://github.com/OGGM/oggm-sample-data
Parameters
----------
fname : str
Filename of the desired OGGM-sample-file
Returns
-------
str
Absolute path to the desired file.
"""
d = download_oggm_files()
if fname in d:
return d[fname]
else:
return None
def get_wgms_files():
"""Get the path to the default WGMS-RGI link file and the data dir.
Returns
-------
(file, dir) : paths to the files
"""
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'wgms')
datadir = os.path.join(sdir, 'mbdata')
assert os.path.exists(datadir)
outf = os.path.join(sdir, 'rgi_wgms_links_20200415.csv')
outf = pd.read_csv(outf, dtype={'RGI_REG': object})
return outf, datadir
def get_glathida_file():
"""Get the path to the default GlaThiDa-RGI link file.
Returns
-------
file : paths to the file
"""
# Roll our own
download_oggm_files()
sdir = os.path.join(cfg.CACHE_DIR,
'oggm-sample-data-%s' % SAMPLE_DATA_COMMIT,
'glathida')
outf = os.path.join(sdir, 'rgi_glathida_links.csv')
assert os.path.exists(outf)
return outf
def get_rgi_dir(version=None, reset=False):
"""Path to the RGI directory.
If the RGI files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI directory
"""
with get_lock():
return _get_rgi_dir_unlocked(version=version, reset=reset)
def _get_rgi_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
rgi_dir = os.path.join(rgi_dir, 'RGIV' + version)
mkdir(rgi_dir, reset=reset)
if version == '50':
dfile = 'http://www.glims.org/RGI/rgi50_files/rgi50.zip'
elif version == '60':
dfile = 'http://www.glims.org/RGI/rgi60_files/00_rgi60.zip'
elif version == '61':
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/rgi_61.zip'
elif version == '62':
dfile = 'https://cluster.klima.uni-bremen.de/~oggm/rgi/rgi62.zip'
test_file = os.path.join(rgi_dir,
'*_rgi*{}_manifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(rgi_dir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + rgi_dir)
return rgi_dir
def get_rgi_region_file(region, version=None, reset=False):
"""Path to the RGI region file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '01' to '19'
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the RGI directory first and downloads the data
Returns
-------
str
path to the RGI shapefile
"""
rgi_dir = get_rgi_dir(version=version, reset=reset)
f = list(glob.glob(rgi_dir + "/*/{}_*.shp".format(region)))
assert len(f) == 1
return f[0]
def get_rgi_glacier_entities(rgi_ids, version=None):
"""Get a list of glacier outlines selected from their RGI IDs.
Will download RGI data if not present.
Parameters
----------
rgi_ids : list of str
the glaciers you want the outlines for
version : str
the rgi version
Returns
-------
geopandas.GeoDataFrame
containing the desired RGI glacier outlines
"""
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
if version is None:
version = rgi_ids[0].split('-')[0][-2:]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_region_file(reg, version=version))
selection.append(sh.loc[sh.RGIId.isin(rgi_ids)])
# Make a new dataframe of those
selection = pd.concat(selection)
selection.crs = sh.crs # for geolocalisation
if len(selection) != len(rgi_ids):
raise RuntimeError('Could not find all RGI ids')
return selection
def get_rgi_intersects_dir(version=None, reset=False):
"""Path to the RGI directory containing the intersect files.
If the files are not present, download them.
Parameters
----------
version : str
'5', '6', defaults to None (linking to the one specified in cfg.PARAMS)
reset : bool
If True, deletes the intersects before redownloading them
Returns
-------
str
path to the directory
"""
with get_lock():
return _get_rgi_intersects_dir_unlocked(version=version, reset=reset)
def _get_rgi_intersects_dir_unlocked(version=None, reset=False):
rgi_dir = cfg.PATHS['rgi_dir']
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
# Be sure the user gave a sensible path to the RGI dir
if not rgi_dir:
raise InvalidParamsError('The RGI data directory has to be'
'specified explicitly.')
rgi_dir = os.path.abspath(os.path.expanduser(rgi_dir))
mkdir(rgi_dir)
dfile = 'https://cluster.klima.uni-bremen.de/data/rgi/'
dfile += 'RGI_V{}_Intersects.zip'.format(version)
if version == '62':
dfile = ('https://cluster.klima.uni-bremen.de/~oggm/rgi/'
'rgi62_Intersects.zip')
odir = os.path.join(rgi_dir, 'RGI_V' + version + '_Intersects')
if reset and os.path.exists(odir):
shutil.rmtree(odir)
# A lot of code for backwards compat (sigh...)
if version in ['50', '60']:
test_file = os.path.join(odir, 'Intersects_OGGM_Manifest.txt')
if not os.path.exists(test_file):
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
if not os.path.exists(test_file):
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
else:
test_file = os.path.join(odir,
'*ntersect*anifest.txt'.format(version))
if len(glob.glob(test_file)) == 0:
# if not there download it
ofile = file_downloader(dfile, reset=reset)
# Extract root
with zipfile.ZipFile(ofile) as zf:
zf.extractall(odir)
# Extract subdirs
pattern = '*_rgi{}_*.zip'.format(version)
for root, dirs, files in os.walk(cfg.PATHS['rgi_dir']):
for filename in fnmatch.filter(files, pattern):
zfile = os.path.join(root, filename)
with zipfile.ZipFile(zfile) as zf:
ex_root = zfile.replace('.zip', '')
mkdir(ex_root)
zf.extractall(ex_root)
# delete the zipfile after success
os.remove(zfile)
if len(glob.glob(test_file)) == 0:
raise RuntimeError('Could not find a manifest file in the RGI '
'directory: ' + odir)
return odir
def get_rgi_intersects_region_file(region=None, version=None, reset=False):
"""Path to the RGI regional intersect file.
If the RGI files are not present, download them.
Parameters
----------
region : str
from '00' to '19', with '00' being the global file (deprecated).
From RGI version '61' onwards, please use `get_rgi_intersects_entities`
with a list of glaciers instead of relying to the global file.
version : str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
reset : bool
If True, deletes the intersect file before redownloading it
Returns
-------
str
path to the RGI intersects shapefile
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
rgi_dir = get_rgi_intersects_dir(version=version, reset=reset)
if region == '00':
if version in ['50', '60']:
version = 'AllRegs'
region = '*'
else:
raise InvalidParamsError("From RGI version 61 onwards, please use "
"get_rgi_intersects_entities() instead.")
f = list(glob.glob(os.path.join(rgi_dir, "*", '*intersects*' + region +
'_rgi*' + version + '*.shp')))
assert len(f) == 1
return f[0]
def get_rgi_intersects_entities(rgi_ids, version=None):
"""Get a list of glacier intersects selected from their RGI IDs.
Parameters
----------
rgi_ids: list of str
list of rgi_ids you want to look for intersections for
version: str
'5', '6', '61'... defaults the one specified in cfg.PARAMS
Returns
-------
geopandas.GeoDataFrame
with the selected intersects
"""
if version is None:
version = cfg.PARAMS['rgi_version']
if len(version) == 1:
version += '0'
regions = [s.split('-')[1].split('.')[0] for s in rgi_ids]
selection = []
for reg in sorted(np.unique(regions)):
sh = gpd.read_file(get_rgi_intersects_region_file(reg,
version=version))
selection.append(sh.loc[sh.RGIId_1.isin(rgi_ids) |
sh.RGIId_2.isin(rgi_ids)])
# Make a new dataframe of those
selection = | pd.concat(selection) | pandas.concat |
import pandas as pd
import os
import pickle
import numpy as np
from sklearn.metrics import det_curve, roc_auc_score
''''
This file plots results from a set of algorithm runs.
Specify at the top of the file the two variables:
results_root: Directory containing results from the run
figs_root: Directory to save test results on outlier datasets
'''
#change these strings for plotting
results_root = 'results_ssnd_10'
figs_root = 'figs_ssnd_10'
def test_fnr_using_valid_threshold(valid_scores_in, valid_scores_out, test_scores_in, test_scores_out, fpr_cutoff=0.05):
valid_labels_in = np.zeros(len(valid_scores_in))
valid_labels_out = np.ones(len(valid_scores_out))
y_true_valid = np.concatenate([valid_labels_in, valid_labels_out])
y_score_valid = np.concatenate([valid_scores_in, valid_scores_out])
fpr, fnr, thresholds = det_curve(y_true=y_true_valid, y_score=y_score_valid)
idx = np.argmin(np.abs(fpr - fpr_cutoff))
t = thresholds[idx]
fpr_test = len(np.array(test_scores_in)[np.array(test_scores_in) >= t]) / len(test_scores_in)
fnr_test = len(np.array(test_scores_out)[np.array(test_scores_out) < t]) / len(test_scores_out)
return fnr_test
def compute_auroc(out_scores, in_scores):
in_labels = np.zeros(len(in_scores))
out_labels = np.ones(len(out_scores))
y_true = np.concatenate([in_labels, out_labels])
y_score = np.concatenate([in_scores, out_scores])
auroc = roc_auc_score(y_true=y_true, y_score=y_score)
return auroc
def load_results(results_root, figs_root):
files = []
valid_fnr = []
test_fnr = []
scores = []
for _, dsets, _ in os.walk(results_root):
for dset in dsets:
dset_dir = os.path.join(results_root, dset)
for _, out_dsets, _ in os.walk(dset_dir):
for out_dset in out_dsets:
out_dset_dir = os.path.join(dset_dir, out_dset)
for _, scores, _ in os.walk(out_dset_dir):
for score in scores:
score_dir = os.path.join(out_dset_dir, score)
for f in os.listdir(score_dir):
f_path = os.path.join(score_dir, f)
files.append(f_path)
results_all = []
num_files = len(files)
count = 0
for file in files:
if not file.endswith('pkl'):
continue
count += 1
# make corresponding fig directory for OOD score plots
d = os.path.normpath(file).split(os.path.sep)
fig_dir = os.path.join(figs_root, *d[1:-1])
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
with open(file, 'rb') as f:
print('[{}/{}] loading {}'.format(count, num_files, file))
try:
results = pickle.load(f)
except EOFError:
print('EOFError, skipping file')
continue
except pickle.UnpicklingError:
print('UnpicklingError, skipping file')
continue
# results from best validation epoch
e = results['best_epoch_valid']
# results['fnr_train_best_epoch_valid'] = results['fnr_train'][e]
results['min_valid_fnr'] = results['fnr_valid'][e]
# results['fnr_test_best_epoch_valid'] = results['fnr_test'][e]
results['train_acc_best_epoch_valid'] = results['train_accuracy'][e]
results['valid_acc_best_epoch_valid'] = results['valid_accuracy'][e]
results['test_acc_best_epoch_valid'] = results['test_accuracy'][e]
results['fnr_test_best_epoch_valid'] = test_fnr_using_valid_threshold(results['OOD_scores_P0_valid'][e],
results['OOD_scores_PX_valid'][e],
results['OOD_scores_P0_test'][e],
results['OOD_scores_Ptest'][e])
results['test_auroc_best_epoch_valid'] = compute_auroc(in_scores=results['OOD_scores_P0_test'][e],
out_scores=results['OOD_scores_Ptest'][e])
# results from best clean validation epoch
results['min_valid_fnr_clean'] = min(results['fnr_valid_clean'])
e = results['fnr_valid_clean'].index(results['min_valid_fnr_clean'])
results['best_epoch_valid_clean'] = e
results['train_acc_best_epoch_valid_clean'] = results['train_accuracy'][e]
results['valid_acc_best_epoch_valid_clean'] = results['valid_accuracy'][e]
results['test_acc_best_epoch_valid_clean'] = results['test_accuracy'][e]
results['fnr_test_best_epoch_valid_clean'] = test_fnr_using_valid_threshold(results['OOD_scores_P0_valid_clean'][e],
results['OOD_scores_PX_valid_clean'][e],
results['OOD_scores_P0_test'][e],
results['OOD_scores_Ptest'][e])
# results['fnr_test_best_epoch_valid_clean'] = results['fnr_test'][e]
results['test_auroc_best_epoch_valid_clean'] = compute_auroc(in_scores=results['OOD_scores_P0_test'][e],
out_scores=results['OOD_scores_Ptest'][e])
# results from best test epoch
e = results['best_epoch_test']
results['fnr_valid_best_epoch_test'] = results['fnr_valid'][e]
results['min_test_fnr'] = results['fnr_test'][e]
results['train_acc_best_epoch_test'] = results['train_accuracy'][e]
results['valid_acc_best_epoch_test'] = results['valid_accuracy'][e]
results['test_acc_best_epoch_test'] = results['test_accuracy'][e]
results['test_auroc_best_epoch_test'] = compute_auroc(in_scores=results['OOD_scores_P0_test'][e],
out_scores=results['OOD_scores_Ptest'][e])
# results from last epoch
e = results['epoch']
results['fnr_valid_last_epoch'] = results['fnr_valid'][e]
results['fnr_test_last_epoch'] = results['fnr_test'][e]
results['train_acc_last_epoch'] = results['train_accuracy'][e]
results['valid_acc_last_epoch'] = results['valid_accuracy'][e]
results['test_acc_last_epoch'] = results['test_accuracy'][e]
results['test_auroc_last_epoch'] = compute_auroc(in_scores=results['OOD_scores_P0_test'][e],
out_scores=results['OOD_scores_Ptest'][e])
# for plotting valid vs test FNR
valid_fnr.extend(results['fnr_valid'])
test_fnr.extend(results['fnr_test'])
scores.extend([results['pi']] * len(results['fnr_test']))
# save results
results_all.append(results)
return results_all, valid_fnr, test_fnr, scores
def save_all_results(results_all, figs_root):
results_df = pd.DataFrame(results_all)
results_save = results_df[['wandb_name',
'classification', 'pi', 'score', 'dataset', 'aux_out_dataset', 'test_out_dataset',
'epoch', 'epochs',
'false_alarm_cutoff', 'in_constraint_weight', 'out_constraint_weight', 'penalty_mult',
'lr_lam',
'oe_lambda',
'energy_vos_lambda',
'train_in_size', # 'train_out_size',
'valid_in_size', # 'valid_out_size',
'test_in_size', 'test_out_size',
'best_epoch_valid', 'min_valid_fnr', 'fnr_test_best_epoch_valid',
'train_acc_best_epoch_valid', 'valid_acc_best_epoch_valid', 'test_acc_best_epoch_valid',
'best_epoch_valid_clean', 'min_valid_fnr_clean', 'fnr_test_best_epoch_valid_clean',
'train_acc_best_epoch_valid_clean', 'valid_acc_best_epoch_valid_clean', 'test_acc_best_epoch_valid_clean',
'best_epoch_test', 'min_test_fnr', 'fnr_valid_best_epoch_test',
'train_acc_best_epoch_test', 'valid_acc_best_epoch_test', 'test_acc_best_epoch_test',
'fnr_valid_last_epoch', 'fnr_test_last_epoch', 'train_acc_last_epoch',
'valid_acc_last_epoch', 'test_acc_last_epoch',
'test_auroc_best_epoch_valid', 'test_auroc_best_epoch_valid_clean',
'test_auroc_best_epoch_test', 'test_auroc_last_epoch'
]]
results_save = results_save.sort_values(by=['classification', 'pi', 'score', 'dataset', 'aux_out_dataset',
'test_out_dataset', 'false_alarm_cutoff', 'in_constraint_weight',
'out_constraint_weight', 'lr_lam', 'energy_vos_lambda',
'oe_lambda']).reset_index(drop=True)
results_save.to_csv(os.path.join(figs_root, 'results_all.csv'), index=False)
return results_save
def save_min_fnr_results(results_save, figs_root):
idx_list = ['dataset', 'test_out_dataset', 'pi']
idx_list_score = idx_list + ['score']
scores = list(pd.unique(results_save['score']))
# min test FNR
test_min_fnr = pd.pivot_table(data=results_save,
values='min_test_fnr',
index=idx_list,
columns=['score'],
aggfunc='min').reset_index().sort_values(idx_list)
test_min_fnr = test_min_fnr[idx_list + scores]
test_min_fnr[scores] = test_min_fnr[scores] * 100
test_min_fnr.to_csv(os.path.join(figs_root, 'test_min_fnr.csv'), index=False)
# test auroc at min test FNR
test_min_fnr_idx = results_save.groupby(by=idx_list_score)['min_test_fnr'].idxmin()
results_test_min_fnr = results_save.iloc[test_min_fnr_idx]
test_auroc_at_test_min_fnr = pd.pivot_table(data=results_test_min_fnr,
values='test_auroc_best_epoch_test',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_auroc_at_test_min_fnr = test_auroc_at_test_min_fnr[idx_list + scores]
test_auroc_at_test_min_fnr[scores] = test_auroc_at_test_min_fnr[scores] * 100
test_auroc_at_test_min_fnr.to_csv(os.path.join(figs_root, 'test_auroc_at_test_min_fnr.csv'), index=False)
# test accuracy at min test FNR
test_acc_at_test_min_fnr = pd.pivot_table(data=results_test_min_fnr,
values='test_acc_best_epoch_test',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_acc_at_test_min_fnr = test_acc_at_test_min_fnr[idx_list + scores]
test_acc_at_test_min_fnr[scores] = test_acc_at_test_min_fnr[scores] * 100
test_acc_at_test_min_fnr.to_csv(os.path.join(figs_root, 'test_acc_at_test_min_fnr.csv'), index=False)
# test auroc at min test FNR
test_auroc_at_test_min_fnr = pd.pivot_table(data=results_test_min_fnr,
values='test_auroc_best_epoch_test',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_auroc_at_test_min_fnr = test_auroc_at_test_min_fnr[idx_list + scores]
test_auroc_at_test_min_fnr[scores] = test_auroc_at_test_min_fnr[scores] * 100
test_auroc_at_test_min_fnr.to_csv(os.path.join(figs_root, 'test_auroc_at_test_min_fnr.csv'), index=False)
# test FNR at min valid FNR
valid_min_fnr_idx = results_save.groupby(by=idx_list_score)['min_valid_fnr'].idxmin()
results_valid_min_fnr = results_save.iloc[valid_min_fnr_idx]
test_fnr_at_valid_min_fnr = pd.pivot_table(data=results_valid_min_fnr,
values='fnr_test_best_epoch_valid',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_fnr_at_valid_min_fnr = test_fnr_at_valid_min_fnr[idx_list + scores]
test_fnr_at_valid_min_fnr[scores] = test_fnr_at_valid_min_fnr[scores] * 100
test_fnr_at_valid_min_fnr.to_csv(os.path.join(figs_root, 'test_fnr_at_valid_min_fnr.csv'), index=False)
# test auroc at min valid FNR
test_auroc_at_valid_min_fnr = pd.pivot_table(data=results_valid_min_fnr,
values='test_auroc_best_epoch_valid',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_auroc_at_valid_min_fnr = test_auroc_at_valid_min_fnr[idx_list + scores]
test_auroc_at_valid_min_fnr[scores] = test_auroc_at_valid_min_fnr[scores] * 100
test_auroc_at_valid_min_fnr.to_csv(os.path.join(figs_root, 'test_auroc_at_valid_min_fnr.csv'), index=False)
# test acc at min valid FNR
test_acc_at_valid_min_fnr = pd.pivot_table(data=results_valid_min_fnr,
values='test_acc_best_epoch_valid',
index=idx_list,
columns='score',
aggfunc='mean').reset_index().sort_values(idx_list)
test_acc_at_valid_min_fnr = test_acc_at_valid_min_fnr[idx_list + scores]
test_acc_at_valid_min_fnr[scores] = test_acc_at_valid_min_fnr[scores] * 100
test_acc_at_valid_min_fnr.to_csv(os.path.join(figs_root, 'test_acc_at_valid_min_fnr.csv'), index=False)
return test_min_fnr, test_acc_at_test_min_fnr, test_fnr_at_valid_min_fnr, test_acc_at_valid_min_fnr
def save_min_fnr_results_valid_clean(results_save, figs_root):
idx_list = ['dataset', 'test_out_dataset', 'pi']
idx_list_score = idx_list + ['score']
scores = list( | pd.unique(results_save['score']) | pandas.unique |
"""
Target Problem:
---------------
* To train a model to predict the brain connectivity for the next time point given the brain connectivity at current time point.
Proposed Solution (Machine Learning Pipeline):
----------------------------------------------
* Preprocessing Method (if any) -> Dimensionality Reduction method (if any) -> Learner
Input to Proposed Solution:
---------------------------
* Directories of training and testing data in csv file format
* These two types of data should be stored in n x m pattern in csv file format.
Typical Example:
----------------
n x m samples in training csv file (Explain n and m)
k x s samples in testing csv file (Explain k and s
Output of Proposed Solution:
----------------------------
* Predictions generated by learning model for testing set
* They are stored in "submission.csv" file. (Change the name file if needed)
Code Owner:
-----------
* Copyright © <NAME>. All rights reserved.
* Copyright © Istanbul Technical University, Learning From Data Spring/Fall 2020. All rights reserved.
"""
from sklearn import random_projection, preprocessing
from sklearn.linear_model import LinearRegression, Lasso
from sklearn.linear_model import ElasticNet, OrthogonalMatchingPursuit, SGDRegressor, Lars, BayesianRidge
from sklearn.linear_model import ARDRegression, PassiveAggressiveRegressor, RANSACRegressor
from sklearn.linear_model import Ridge, SGDRegressor, TheilSenRegressor, HuberRegressor
from sklearn.feature_selection import GenericUnivariateSelect,SelectFromModel, VarianceThreshold, RFECV, RFE
from sklearn.svm import SVC, SVR, NuSVR, LinearSVR
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import KFold
from sklearn.neighbors import KNeighborsRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.decomposition import PCA
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.isotonic import IsotonicRegression
from sklearn.multioutput import MultiOutputRegressor
from sklearn.metrics import mean_squared_error as mse
from lightgbm import LGBMRegressor
from sklearn.metrics import mean_absolute_error as mae
from scipy.stats.stats import pearsonr
import xgboost as xgb
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import random as r
r.seed(1)
np.random.seed(1)
import warnings
warnings.filterwarnings("ignore")
def load_data(x_train, y_train, x_test):
"""
Parameters
----------
x_train: directory of training data t0
y_train: directory of training data t1
x_test: directory of test data t0
"""
train_t0 = pd.read_csv(x_train).loc[:,"f1":]
train_t1 = pd.read_csv(y_train).loc[:,"f1":]
test_t0 = pd.read_csv(x_test).loc[:,"f1":]
return train_t0, train_t1, test_t0
def preprocessing(x_tra, y_tra, x_tst):
"""
* Explain the method
* Explain which function does it
----------
x_tra: features of training data
y_tra: labels of training data
x_tst: features of test data
"""
transformer = GenericUnivariateSelect(score_func=lambda X, y: X.mean(axis=0), mode='percentile', param=85)
X_train_new = transformer.fit_transform(x_tra, y_tra)
X_test_new = transformer.transform(x_tst)
transformer = PCA(n_components=21)
X_train_new = transformer.fit_transform(X_train_new)
X_test_new = transformer.transform(X_test_new)
return X_train_new, X_test_new
def train_voting(X,y):
"""
The method creates a learning model and trains it by using training data.
Parameters
----------
X: preprocessed features of training data
y: features of training data
"""
# X after preprocessing
models = [
MultiOutputRegressor(AdaBoostRegressor(n_estimators=100, learning_rate=0.04)),
KNeighborsRegressor(algorithm='ball_tree', n_neighbors=24),
Lasso(alpha=0.001, tol=1e-10, max_iter=10000),
KNeighborsRegressor(n_neighbors=27, algorithm='kd_tree', weights='distance', leaf_size=15),
MultiOutputRegressor(BayesianRidge(tol=1e-2, n_iter=15)),
KNeighborsRegressor(n_neighbors=15, algorithm='brute', weights='distance', leaf_size=15),
MultiOutputRegressor(BayesianRidge(tol=1e-2, n_iter=50)),
KNeighborsRegressor(n_neighbors=35, algorithm='brute', weights='distance', leaf_size=15),
OrthogonalMatchingPursuit(),
MultiOutputRegressor(LGBMRegressor(objective='regression')),
MultiOutputRegressor(xgb.XGBRegressor()),
MultiOutputRegressor(BayesianRidge(tol=1e-2, n_iter=100)),
KNeighborsRegressor(algorithm='ball_tree', n_neighbors=24),
KNeighborsRegressor(n_neighbors=27, algorithm='kd_tree', weights='distance', leaf_size=15),
KNeighborsRegressor(n_neighbors=15, algorithm='brute', weights='distance', leaf_size=15)
]
for model in models:
model.fit(X,y)
return models
def voting_predict(X, models):
"""
Parameters
----------
X: preprocessed features of test data
models: trained models of voting algorithms
"""
pred=0
for model in models:
pred+=model.predict(X)
return pred/len(models)
def train_feature_based(X,y,sel_models):
"""
Parameters
----------
X: features of training data
y: features of training data
sel_models: selected models
We selected the best machine learning algorithms for each feature
and saved it to Selected_models.pickle file.
"""
trained_models = []
for i in range(1,596):
trained = sel_models[i-1].fit(X["f"+str(i)].values.reshape(-1, 1),y["f"+str(i)].values)
trained_models.append( trained )
return trained_models
def predict_feature_based(X,trained_best_models):
"""
Parameters
----------
X: features of test data
trained_best_models: 595 different trained model.
We selected the best machine learning algorithms for each feature
and trained these models with train_selected_models function.
"""
preds = []
for i in range(0,595):
prediction = trained_best_models[i].predict(X["f"+str(i+1)].values.reshape(-1, 1))
preds.append(prediction)
np_pred = np.array(preds).T
return np_pred
def write_output(filename, prediction):
"""
Parameters
----------
filename: name of the csv file
prediction: prediction
"""
sample_submission = | pd.read_csv("sampleSubmission.csv") | pandas.read_csv |
import numpy as np
import os
import pandas as pd
# import eia
from datetime import datetime
import pytz
import json
from os.path import join
import zipfile
import requests
import logging
from electricitylci.globals import data_dir, output_dir
from electricitylci.bulk_eia_data import download_EBA, row_to_df, ba_exchange_to_df
from electricitylci.model_config import model_specs
import electricitylci.eia923_generation as eia923
import electricitylci.eia860_facilities as eia860
from electricitylci.process_dictionary_writer import *
"""
Merge generation and emissions data. Add region designations using either
eGRID or EIA-860. Same for primary fuel by plant (eGRID or 923). Calculate
and merge in the total generation by region. Create the column "Subregion"
to hold regional name info. Remove electricity flows. Rename flows and add
UUIDs according to the federal flow list.
Parameters
----------
year : int
Specified year to pull transaction data between balancing authorities
subregion : str
Description of a group of regions. Options include 'FERC' for all FERC
market regions, 'BA' for all balancing authorities.
Returns
-------
Dictionary of dataframes with import region, export region, transaction amount, total
imports for import region, and fraction of total. The dictionary keys
are the level of aggregation: "BA", "FERC", "US".
Sample output:
ferc_final_trade.head()
import ferc region export ferc region value total fraction
0 CAISO CAISO 2.662827e+08 3.225829e+08 0.825471
1 CAISO Canada 1.119572e+06 3.225829e+08 0.003471
2 CAISO ERCOT 0.000000e+00 3.225829e+08 0.000000
3 CAISO ISO-NE 0.000000e+00 3.225829e+08 0.000000
4 CAISO MISO 0.000000e+00 3.225829e+08 0.000000
"""
def ba_io_trading_model(year=None, subregion=None, regions_to_keep=None):
REGION_NAMES = [
'California', 'Carolinas', 'Central',
'Electric Reliability Council of Texas, Inc.', 'Florida',
'Mid-Atlantic', 'Midwest', 'New England ISO',
'New York Independent System Operator', 'Northwest', 'Southeast',
'Southwest', 'Tennessee Valley Authority'
]
REGION_ACRONYMS = [
'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA',
'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW',
]
if year is None:
year = model_specs.NETL_IO_trading_year
if subregion is None:
subregion = model_specs.regional_aggregation
if subregion not in ['BA', 'FERC','US']:
raise ValueError(
f'subregion or regional_aggregation must have a value of "BA" or "FERC" '
f'when calculating trading with input-output, not {subregion}'
)
# Read in BAA file which contains the names and abbreviations
df_BA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'US', header = 4)
df_BA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
BA = | pd.np.array(df_BA['BA_Acronym']) | pandas.np.array |
import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseConstructorsTests(BaseExtensionTests):
def test_array_from_scalars(self, data):
scalars = [data[0], data[1], data[2]]
result = data._from_sequence(scalars)
assert isinstance(result, type(data))
def test_series_constructor(self, data):
result = | pd.Series(data) | pandas.Series |
#!/usr/bin/env python3
from pathlib import Path
import numpy as np
import pandas as pd
import pyranges as pr
from src import base_dir
from src.load import data
_anno_dir = base_dir / "data/peak_annos"
_anno_paths = {
"genomic_annos": _anno_dir / "genomic_annos.txt",
"chip_annos": _anno_dir / "chip_annos.txt",
"omic_counts_annos": _anno_dir / "omic_counts_annos.txt",
"corr_qtl_annos": _anno_dir / "corr_qtl_annos.txt",
}
class PeakAnnos:
def load_genomic_annos():
return | pd.read_csv(_anno_paths["genomic_annos"], sep="\t", index_col=0) | pandas.read_csv |
import abscplane
import numpy as np
import pandas as pd
class ArrayComplexPlane(abscplane.AbsComplexPlane):
def __init__(self,xmin,xmax,xlen,ymin,ymax,ylen):
self.xmin = float(xmin)
self.xmax = float(xmax)
self.xlen = int(xlen)
self.ymin = float(ymin)
self.ymax = float(ymax)
self.ylen = int(ylen)
# The implementation type of plane is up to the user
self.plane = self.__creategrid__()
# fs should be a list of functions, initialized to be empty
self.fs = []
def __creategrid__(self):
x = np.linspace(self.xmin,self.xmax,self.xlen)
y = np.linspace(self.ymin,self.ymax,self.ylen)
x,y = np.meshgrid(x,y)
return | pd.DataFrame(x+y*1j) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
# python 2/3 compatibility
try:
basestring
except NameError:
basestring = str
import string
import os
import copy
import sys
import pandas as pds
import numpy as np
import xarray as xr
from . import _custom
from . import _files
from . import _orbits
from . import _meta
from . import utils
from pysat import data_dir
from pysat import DataFrame, Series
# main class for users
class Instrument(object):
"""Download, load, manage, modify and analyze science data.
Parameters
----------
platform : string
name of platform/satellite.
name : string
name of instrument.
tag : string, optional
identifies particular subset of instrument data.
sat_id : string, optional
identity within constellation
clean_level : {'clean','dusty','dirty','none'}, optional
level of data quality
pad : pandas.DateOffset, or dictionary, optional
Length of time to pad the begining and end of loaded data for
time-series processing. Extra data is removed after applying all
custom functions. Dictionary, if supplied, is simply passed to
pandas DateOffset.
orbit_info : dict
Orbit information, {'index':index, 'kind':kind, 'period':period}.
See pysat.Orbits for more information.
inst_module : module, optional
Provide instrument module directly.
Takes precedence over platform/name.
update_files : boolean, optional
If True, immediately query filesystem for instrument files and store.
temporary_file_list : boolean, optional
If true, the list of Instrument files will not be written to disk.
Prevents a race condition when running multiple pysat processes.
multi_file_day : boolean, optional
Set to True if Instrument data files for a day are spread across
multiple files and data for day n could be found in a file
with a timestamp of day n-1 or n+1.
manual_org : bool
if True, then pysat will look directly in pysat data directory
for data files and will not use default /platform/name/tag
directory_format : str
directory naming structure in string format. Variables such as
platform, name, and tag will be filled in as needed using python
string formatting. The default directory structure would be
expressed as '{platform}/{name}/{tag}'
file_format : str or NoneType
File naming structure in string format. Variables such as year,
month, and sat_id will be filled in as needed using python string
formatting. The default file format structure is supplied in the
instrument list_files routine.
units_label : str
String used to label units in storage. Defaults to 'units'.
name_label : str
String used to label long_name in storage. Defaults to 'name'.
notes_label : str
label to use for notes in storage. Defaults to 'notes'
desc_label : str
label to use for variable descriptions in storage. Defaults to 'desc'
plot_label : str
label to use to label variables in plots. Defaults to 'label'
axis_label : str
label to use for axis on a plot. Defaults to 'axis'
scale_label : str
label to use for plot scaling type in storage. Defaults to 'scale'
min_label : str
label to use for typical variable value min limit in storage.
Defaults to 'value_min'
max_label : str
label to use for typical variable value max limit in storage.
Defaults to 'value_max'
fill_label : str
label to use for fill values. Defaults to 'fill' but some implementations
will use 'FillVal'
Attributes
----------
data : pandas.DataFrame
loaded science data
date : pandas.datetime
date for loaded data
yr : int
year for loaded data
bounds : (datetime/filename/None, datetime/filename/None)
bounds for loading data, supply array_like for a season with gaps
doy : int
day of year for loaded data
files : pysat.Files
interface to instrument files
meta : pysat.Meta
interface to instrument metadata, similar to netCDF 1.6
orbits : pysat.Orbits
interface to extracting data orbit-by-orbit
custom : pysat.Custom
interface to instrument nano-kernel
kwargs : dictionary
keyword arguments passed to instrument loading routine
Note
----
Pysat attempts to load the module platform_name.py located in
the pysat/instruments directory. This module provides the underlying
functionality to download, load, and clean instrument data.
Alternatively, the module may be supplied directly
using keyword inst_module.
Examples
--------
::
# 1-second mag field data
vefi = pysat.Instrument(platform='cnofs',
name='vefi',
tag='dc_b',
clean_level='clean')
start = pysat.datetime(2009,1,1)
stop = pysat.datetime(2009,1,2)
vefi.download(start, stop)
vefi.load(date=start)
print(vefi['dB_mer'])
print(vefi.meta['db_mer'])
# 1-second thermal plasma parameters
ivm = pysat.Instrument(platform='cnofs',
name='ivm',
tag='',
clean_level='clean')
ivm.download(start,stop)
ivm.load(2009,1)
print(ivm['ionVelmeridional'])
# Ionosphere profiles from GPS occultation
cosmic = pysat.Instrument('cosmic2013',
'gps',
'ionprf',
altitude_bin=3)
# bins profile using 3 km step
cosmic.download(start, stop, user=user, password=password)
cosmic.load(date=start)
"""
def __init__(self, platform=None, name=None, tag=None, sat_id=None,
clean_level='clean', update_files=None, pad=None,
orbit_info=None, inst_module=None, multi_file_day=None,
manual_org=None, directory_format=None, file_format=None,
temporary_file_list=False, units_label='units',
name_label='long_name', notes_label='notes', desc_label='desc',
plot_label='label', axis_label='axis', scale_label='scale',
min_label='value_min', max_label='value_max',
fill_label = 'fill', *arg, **kwargs):
if inst_module is None:
# use strings to look up module name
if isinstance(platform, str) and isinstance(name, str):
self.platform = platform.lower()
self.name = name.lower()
# look to module for instrument functions and defaults
self._assign_funcs(by_name=True)
elif (platform is None) and (name is None):
# creating "empty" Instrument object with this path
self.name = ''
self.platform = ''
self._assign_funcs()
else:
raise ValueError('Inputs platform and name must both be ' +
'strings, or both None.')
else:
# user has provided a module
try:
# platform and name are expected to be part of module
self.name = inst_module.name.lower()
self.platform = inst_module.platform.lower()
except AttributeError:
raise AttributeError(string.join(('A name and platform ',
'attribute for the ',
'instrument is required if ',
'supplying routine module ',
'directly.')))
# look to module for instrument functions and defaults
self._assign_funcs(inst_module=inst_module)
# more reasonable defaults for optional parameters
self.tag = tag.lower() if tag is not None else ''
self.sat_id = sat_id.lower() if sat_id is not None else ''
self.clean_level = (clean_level.lower() if clean_level is not None
else 'none')
# assign_func sets some instrument defaults, direct info rules all
if directory_format is not None:
self.directory_format = directory_format.lower()
# value not provided by user, check if there is a value provided by
# instrument module
elif self.directory_format is not None:
try:
# check if it is a function
self.directory_format = self.directory_format(tag, sat_id)
except TypeError:
pass
if file_format is not None:
self.file_format = file_format
# check to make sure value is reasonable
if self.file_format is not None:
# check if it is an iterable string. If it isn't formatted
# properly, raise Error
if (not isinstance(self.file_format, str) or
(self.file_format.find("{") < 0) or
(self.file_format.find("}") < 0)):
estr = 'file format set to default, supplied string must be '
estr = '{:s}iteratable [{:}]'.format(estr, self.file_format)
raise ValueError(estr)
# set up empty data and metadata
# check if pandas or xarray format
if self.pandas_format:
self._null_data = DataFrame(None)
self._data_library = DataFrame
else:
self._null_data = xr.Dataset(None)
self._data_library = xr.Dataset
self.data = self._null_data.copy()
# create Meta instance with appropriate labels
self.units_label = units_label
self.name_label = name_label
self.notes_label = notes_label
self.desc_label = desc_label
self.plot_label = plot_label
self.axis_label = axis_label
self.scale_label = scale_label
self.min_label = min_label
self.max_label = max_label
self.fill_label = fill_label
self.meta = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label=self.notes_label,
desc_label=self.desc_label,
plot_label=self.plot_label,
axis_label=self.axis_label,
scale_label=self.scale_label,
min_label=self.min_label,
max_label=self.max_label,
fill_label=self.fill_label)
# function processing class, processes data on load
self.custom = _custom.Custom()
# create arrays to store data around loaded day
# enables padding across day breaks with minimal loads
self._next_data = self._null_data.copy()
self._next_data_track = []
self._prev_data = self._null_data.copy()
self._prev_data_track = []
self._curr_data = self._null_data.copy()
# multi file day, default set by assign_funcs
if multi_file_day is not None:
self.multi_file_day = multi_file_day
# arguments for padding
if isinstance(pad, pds.DateOffset):
self.pad = pad
elif isinstance(pad, dict):
self.pad = pds.DateOffset(**pad)
elif pad is None:
self.pad = None
else:
estr = 'pad must be a dictionary or a pandas.DateOffset instance.'
raise ValueError(estr)
# instantiate Files class
manual_org = False if manual_org is None else manual_org
temporary_file_list = not temporary_file_list
self.files = _files.Files(self, manual_org=manual_org,
directory_format=self.directory_format,
update_files=update_files,
file_format=self.file_format,
write_to_disk=temporary_file_list)
# set bounds for iteration
# self.bounds requires the Files class
# setting (None,None) loads default bounds
self.bounds = (None, None)
self.date = None
self._fid = None
self.yr = None
self.doy = None
self._load_by_date = False
# initialize orbit support
if orbit_info is None:
if self.orbit_info is None:
# if default info not provided, set None as default
orbit_info = {'index': None, 'kind': None, 'period': None}
else:
# default provided by instrument module
orbit_info = self.orbit_info
self.orbits = _orbits.Orbits(self, **orbit_info)
# Create empty placeholder for meta translation table
# gives information about how to label metadata for netcdf export
# if None, pysat metadata labels will be used
self._meta_translation_table = None
# Create a placeholder for a post-processing function to be applied
# to the metadata dictionary before export. If None, no post-processing
# will occur
self._export_meta_post_processing = None
# store kwargs, passed to load routine
self.kwargs = kwargs
# run instrument init function, a basic pass function is used
# if user doesn't supply the init function
self._init_rtn(self)
# store base attributes, used in particular by Meta class
self._base_attr = dir(self)
def __getitem__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if self.pandas_format:
if isinstance(key, tuple):
# support slicing
return self.data.ix[key[0], key[1]]
else:
try:
# integer based indexing
return self.data.iloc[key]
except:
try:
# let pandas sort it out, presumption is key is
# a variable name, or iterable of variables
return self.data[key]
except:
estring = '\n'.join(("Unable to sort out data access.",
"Instrument has data : " +
str(not self.empty),
"Requested key : ", key))
raise ValueError(estring)
else:
return self.__getitem_xarray__(key)
def __getitem_xarray__(self, key):
"""
Convenience notation for accessing data; inst['name'] is inst.data.name
Examples
--------
::
# By name
inst['name']
# By position
inst[row_index, 'name']
# Slicing by row
inst[row1:row2, 'name']
# By Date
inst[datetime, 'name']
# Slicing by date, inclusive
inst[datetime1:datetime2, 'name']
# Slicing by name and row/date
inst[datetime1:datetime1, 'name1':'name2']
"""
if 'time' not in self.data:
return xr.Dataset(None)
if isinstance(key, tuple):
if len(key) == 2:
# support slicing time, variable name
try:
return self.data.isel(time=key[0])[key[1]]
except:
return self.data.sel(time=key[0])[key[1]]
else:
# multidimensional indexing
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
return self.data[key[-1]][indict]
else:
try:
# grab a particular variable by name
return self.data[key]
except:
# that didn't work
try:
# get all data variables but for a subset of time
# using integer indexing
return self.data.isel(time=key)
except:
# subset of time, using label based indexing
return self.data.sel(time=key)
def __setitem__(self, key, new):
"""Convenience method for adding data to instrument.
Examples
--------
::
# Simple Assignment, default metadata assigned
# 'long_name' = 'name'
# 'units' = ''
inst['name'] = newData
# Assignment with Metadata
inst['name'] = {'data':new_data,
'long_name':long_name,
'units':units}
Note
----
If no metadata provided and if metadata for 'name' not already stored
then default meta information is also added,
long_name = 'name', and units = ''.
"""
# add data to main pandas.DataFrame, depending upon the input
# aka slice, and a name
if self.pandas_format:
if isinstance(key, tuple):
self.data.ix[key[0], key[1]] = new
self.meta[key[1]] = {}
return
elif not isinstance(new, dict):
# make it a dict to simplify downstream processing
new = {'data': new}
# input dict must have data in 'data',
# the rest of the keys are presumed to be metadata
in_data = new.pop('data')
if hasattr(in_data, '__iter__'):
if isinstance(in_data, pds.DataFrame):
pass
# filter for elif
elif isinstance(next(iter(in_data), None), pds.DataFrame):
# input is a list_like of frames
# this is higher order data
# this process ensures
if ('meta' not in new) and (key not in self.meta.keys_nD()):
# create an empty Meta instance but with variable names
# this will ensure the correct defaults for all
# subvariables. Meta can filter out empty metadata as
# needed, the check above reducesthe need to create
# Meta instances
ho_meta = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label=self.notes_label,
desc_label=self.desc_label,
plot_label=self.plot_label,
axis_label=self.axis_label,
scale_label=self.scale_label,
fill_label=self.fill_label,
min_label=self.min_label,
max_label=self.max_label)
ho_meta[in_data[0].columns] = {}
self.meta[key] = ho_meta
# assign data and any extra metadata
self.data[key] = in_data
self.meta[key] = new
else:
# xarray format chosen for Instrument object
if not isinstance(new, dict):
new = {'data': new}
in_data = new.pop('data')
if isinstance(key, tuple):
# user provided more than one thing in assignment location
# something like, index integers and a variable name
# self[idx, 'variable'] = stuff
# or, self[idx1, idx2, idx3, 'variable'] = stuff
# construct dictionary of dimensions and locations for
# xarray standards
indict = {}
for i, dim in enumerate(self[key[-1]].dims):
indict[dim] = key[i]
# if dim == 'time':
# indict[dim] = self.index[key[i]]
try:
self.data[key[-1]].loc[indict] = in_data
except:
indict['time'] = self.index[indict['time']]
self.data[key[-1]].loc[indict] = in_data
self.meta[key[-1]] = new
return
elif isinstance(key, basestring):
# assigning basic variable
# if xarray input, take as is
if isinstance(in_data, xr.DataArray):
self.data[key] = in_data
# ok, not an xarray input
# but if we have an iterable input, then we
# go through here
elif len(np.shape(in_data)) == 1:
# looking at a 1D input here
if len(in_data) == len(self.index):
# 1D input has the correct length for storage along
# 'time'
self.data[key] = ('time', in_data)
elif len(in_data) == 1:
# only provided a single number in iterable, make that
# the input for all times
self.data[key] = ('time', [in_data[0]]*len(self.index))
elif len(in_data) == 0:
# provided an empty iterable
# make everything NaN
self.data[key] = ('time', [np.nan]*len(self.index))
# not an iterable input
elif len(np.shape(in_data)) == 0:
# not given an iterable at all, single number
# make that number the input for all times
self.data[key] = ('time', [in_data]*len(self.index))
else:
# multidimensional input that is not an xarray
# user needs to provide what is required
if isinstance(in_data, tuple):
self.data[key] = in_data
else:
raise ValueError('Must provide dimensions for xarray ' +
'multidimensional data using input ' +
'tuple.')
elif hasattr(key, '__iter__'):
# multiple input strings (keys) are provided, but not in tuple
# form recurse back into this function, setting each
# input individually
for keyname in key:
self.data[keyname] = in_data[keyname]
# attach metadata
self.meta[key] = new
@property
def empty(self):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
if self.pandas_format:
return self.data.empty
else:
if 'time' in self.data.indexes:
return len(self.data.indexes['time']) == 0
else:
return True
def _empty(self, data=None):
"""Boolean flag reflecting lack of data.
True if there is no Instrument data."""
if data is None:
data = self.data
if self.pandas_format:
return data.empty
else:
if 'time' in data.indexes:
return len(data.indexes['time']) == 0
else:
return True
@property
def index(self):
"""Returns time index of loaded data."""
if self.pandas_format:
return self.data.index
else:
if 'time' in self.data.indexes:
return self.data.indexes['time']
else:
return pds.Index([])
def _index(self, data=None):
"""Returns time index of loaded data."""
if data is None:
data = self.data
if self.pandas_format:
return data.index
else:
if 'time' in data.indexes:
return data.indexes['time']
else:
return pds.Index([])
@property
def variables(self):
"""Returns list of variables within loaded data."""
if self.pandas_format:
return self.data.columns
else:
return list(self.data.variables.keys())
def copy(self):
"""Deep copy of the entire Instrument object."""
return copy.deepcopy(self)
def concat_data(self, data, *args, **kwargs):
"""Concats data1 and data2 for xarray or pandas as needed"""
if self.pandas_format:
return pds.concat(data, *args, **kwargs)
else:
return xr.concat(data, dim='time')
def _pass_func(*args, **kwargs):
pass
def _assign_funcs(self, by_name=False, inst_module=None):
"""Assign all external science instrument methods to Instrument object.
"""
import importlib
# set defaults
self._list_rtn = self._pass_func
self._load_rtn = self._pass_func
self._default_rtn = self._pass_func
self._clean_rtn = self._pass_func
self._init_rtn = self._pass_func
self._download_rtn = self._pass_func
# default params
self.directory_format = None
self.file_format = None
self.multi_file_day = False
self.orbit_info = None
self.pandas_format = True
if by_name:
# look for code with filename name, any errors passed up
inst = importlib.import_module(''.join(('.', self.platform, '_',
self.name)),
package='pysat.instruments')
elif inst_module is not None:
# user supplied an object with relevant instrument routines
inst = inst_module
else:
# no module or name info, default pass functions assigned
return
try:
self._load_rtn = inst.load
self._list_rtn = inst.list_files
self._download_rtn = inst.download
except AttributeError:
estr = 'A load, file_list, and download routine are required for '
raise AttributeError('{:s}every instrument.'.format(estr))
try:
self._default_rtn = inst.default
except AttributeError:
pass
try:
self._init_rtn = inst.init
except AttributeError:
pass
try:
self._clean_rtn = inst.clean
except AttributeError:
pass
# look for instrument default parameters
try:
self.directory_format = inst.directory_format
except AttributeError:
pass
try:
self.multi_file_day = inst.multi_file_day
except AttributeError:
pass
try:
self.orbit_info = inst.orbit_info
except AttributeError:
pass
try:
self.pandas_format = inst.pandas_format
except AttributeError:
pass
return
def __str__(self):
output_str = '\npysat Instrument object\n'
output_str += '-----------------------\n'
output_str += 'Platform: '+self.platform+'\n'
output_str += 'Name: '+self.name+'\n'
output_str += 'Tag: '+self.tag+'\n'
output_str += 'Satellite id: '+self.sat_id+'\n'
output_str += '\nData Processing\n'
output_str += '---------------\n'
output_str += 'Cleaning Level: ' + self.clean_level + '\n'
output_str += 'Data Padding: ' + self.pad.__repr__() + '\n'
output_str += 'Keyword Arguments Passed to load(): '
output_str += self.kwargs.__repr__() +'\nCustom Functions : \n'
if len(self.custom._functions) > 0:
for func in self.custom._functions:
output_str += ' ' + func.__repr__() + '\n'
else:
output_str += ' ' + 'No functions applied.\n'
output_str += '\nOrbit Settings' + '\n'
output_str += '--------------' + '\n'
if self.orbit_info is None:
output_str += 'Orbit properties not set.\n'
else:
output_str += 'Orbit Kind: ' + self.orbit_info['kind'] + '\n'
output_str += 'Orbit Index: ' + self.orbit_info['index'] + '\n'
output_str += 'Orbit Period: '
output_str += self.orbit_info['period'].__str__() + '\n'
output_str += 'Number of Orbits: {:d}\n'.format(self.orbits.num)
output_str += 'Loaded Orbit Number: '
if self.orbits.current is not None:
output_str += '{:d}\n'.format(self.orbits.current)
else:
output_str += 'None\n'
output_str += '\nLocal File Statistics' + '\n'
output_str += '---------------------' + '\n'
output_str += 'Number of files: ' + str(len(self.files.files)) + '\n'
if len(self.files.files) > 0:
output_str += 'Date Range: '
output_str += self.files.files.index[0].strftime('%m/%d/%Y')
output_str += ' --- '
output_str += self.files.files.index[-1].strftime('%m/%d/%Y')
output_str += '\n\nLoaded Data Statistics'+'\n'
output_str += '----------------------'+'\n'
if not self.empty:
# if self._fid is not None:
# output_str += 'Filename: ' +
output_str += 'Date: ' + self.date.strftime('%m/%d/%Y') + '\n'
output_str += 'DOY: {:03d}'.format(self.doy) + '\n'
output_str += 'Time range: '
output_str += self.index[0].strftime('%m/%d/%Y %H:%M:%S')
output_str += ' --- '
output_str += self.index[-1].strftime('%m/%d/%Y %H:%M:%S')+'\n'
output_str += 'Number of Times: ' + str(len(self.index)) + '\n'
output_str += 'Number of variables: ' + str(len(self.variables))
output_str += '\n\nVariable Names:'+'\n'
num = len(self.variables)//3
for i in np.arange(num):
output_str += self.variables[3 * i].ljust(30)
output_str += self.variables[3 * i + 1].ljust(30)
output_str += self.variables[3 * i + 2].ljust(30)+'\n'
for i in np.arange(len(self.variables) - 3 * num):
output_str += self.variables[i+3*num].ljust(30)
output_str += '\n'
else:
output_str += 'No loaded data.'+'\n'
output_str += '\n'
return output_str
def _filter_datetime_input(self, date):
"""
Returns datetime that only includes year, month, and day.
Parameters
----------
date : datetime
Returns
-------
datetime
Only includes year, month, and day from original input
"""
return pds.datetime(date.year, date.month, date.day)
def today(self):
"""Returns today's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Today's date
"""
return self._filter_datetime_input(pds.datetime.today())
def tomorrow(self):
"""Returns tomorrow's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Tomorrow's date
"""
return self.today()+pds.DateOffset(days=1)
def yesterday(self):
"""Returns yesterday's date, with no hour, minute, second, etc.
Parameters
----------
None
Returns
-------
datetime
Yesterday's date
"""
return self.today()-pds.DateOffset(days=1)
def _load_data(self, date=None, fid=None):
"""
Load data for an instrument on given date or fid, dependng upon input.
Parameters
----------
date : (dt.datetime.date object or NoneType)
file date
fid : (int or NoneType)
filename index value
Returns
--------
data : (pds.DataFrame)
pysat data
meta : (pysat.Meta)
pysat meta data
"""
if fid is not None:
# get filename based off of index value
fname = self.files[fid:fid+1]
elif date is not None:
fname = self.files[date: date+pds.DateOffset(days=1)]
else:
raise ValueError('Must supply either a date or file id number.')
if len(fname) > 0:
load_fname = [os.path.join(self.files.data_path, f) for f in fname]
data, mdata = self._load_rtn(load_fname, tag=self.tag,
sat_id=self.sat_id, **self.kwargs)
# ensure units and name are named consistently in new Meta
# object as specified by user upon Instrument instantiation
mdata.accept_default_labels(self)
else:
data = self._null_data.copy()
mdata = _meta.Meta(units_label=self.units_label,
name_label=self.name_label,
notes_label = self.notes_label,
desc_label = self.desc_label,
plot_label = self.plot_label,
axis_label = self.axis_label,
scale_label = self.scale_label,
min_label = self.min_label,
max_label = self.max_label,
fill_label=self.fill_label)
output_str = '{platform} {name} {tag} {sat_id}'
output_str = output_str.format(platform=self.platform,
name=self.name, tag=self.tag,
sat_id=self.sat_id)
# check that data and metadata are the data types we expect
if not isinstance(data, self._data_library):
raise TypeError(' '.join(('Data returned by instrument load',
'routine must be a', self._data_library)))
if not isinstance(mdata, _meta.Meta):
raise TypeError('Metadata returned must be a pysat.Meta object')
# let user know if data was returned or not
if len(data) > 0:
if date is not None:
output_str = ' '.join(('Returning', output_str, 'data for',
date.strftime('%x')))
else:
if len(fname) == 1:
# this check was zero
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0]))
else:
output_str = ' '.join(('Returning', output_str, 'data from',
fname[0], '::', fname[-1]))
else:
# no data signal
output_str = ' '.join(('No', output_str, 'data for',
date.strftime('%m/%d/%y')))
# remove extra spaces, if any
output_str = " ".join(output_str.split())
print (output_str)
return data, mdata
def _load_next(self):
"""Load the next days data (or file) without incrementing the date.
Repeated calls will not advance date/file and will produce the same data
Uses info stored in object to either increment the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
next_date = self.date + pds.DateOffset(days=1)
return self._load_data(date=next_date)
else:
return self._load_data(fid=self._fid+1)
def _load_prev(self):
"""Load the next days data (or file) without decrementing the date.
Repeated calls will not decrement date/file and will produce the same
data
Uses info stored in object to either decrement the date,
or the file. Looks for self._load_by_date flag.
"""
if self._load_by_date:
prev_date = self.date - pds.DateOffset(days=1)
return self._load_data(date=prev_date)
else:
return self._load_data(fid=self._fid-1)
def _set_load_parameters(self, date=None, fid=None):
self.date = date
self._fid = fid
if date is not None:
year, doy = utils.getyrdoy(date)
self.yr = year
self.doy = doy
self._load_by_date = True
else:
self.yr = None
self.doy = None
self._load_by_date = False
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
# ensure date portion from user is only year, month, day
self._set_load_parameters(date=self._filter_datetime_input(date),
fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else | pds.DateOffset(seconds=0) | pandas.DateOffset |
# -*- coding: utf-8 -*-
"""Tracker REITs (Real Estate Investment Trust) investments."""
import datetime
import os
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import pandas as pd
import requests
import requests_cache
from .portutils import NUM_PROCESS, Singleton, UnitsTransactions
# Cache for dividends
CACHE_EXPIRE_DAYS = 15
requests_cache.install_cache(
cache_name="fiidiv",
backend="sqlite",
expire_after=datetime.timedelta(days=CACHE_EXPIRE_DAYS),
)
# Default fii portfolio csv file
# It is used if transaction file is not set
# on environment variable FII_TRANSACTIONS
CSV_FILE = "example_transactions/fii_transactions.csv"
class FiiDividends:
"""Class to handle dividends."""
URL = "https://mfinance.com.br/api/v1/fiis/dividends"
def __init__(self):
"""Initialize fii dividends class."""
self.dividends = {}
def load_dividends(self, ticker):
"""
Download all dividends paid out for a ticker.
Parameters:
ticker (str): FII ticker
"""
ticker_url = "{}/{}".format(self.URL, ticker)
print("Getting dividends: ", ticker_url)
# To not use request cache to get "current" price,
# comment next line and uncomment the other
df_tmp = pd.read_json(requests.get(ticker_url).content)
# df_tmp = pd.read_json(ticker_url)
df_dividends = pd.json_normalize(df_tmp.dividends)
df_dividends["payDate"] = pd.to_datetime(df_dividends["payDate"])
df_dividends["declaredDate"] = | pd.to_datetime(df_dividends["declaredDate"]) | pandas.to_datetime |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from autorank import autorank, create_report, plot_stats, latex_table
np.random.seed(42)
pd.set_option('display.max_columns', 7)
std = 0.3
means = [0.2, 0.3, 0.5, 0.8, 0.85, 0.9]
sample_size = 50
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas._libs.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import (
array_equivalent, isnull, notnull,
na_value_for_dtype)
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(object):
def test_0d_array(self):
assert isnull(np.array(np.nan))
assert not isnull(np.array(0.0))
assert not isnull(np.array(0))
# test object dtype
assert isnull(np.array(np.nan, dtype=object))
assert not isnull(np.array(0.0, dtype=object))
assert not isnull(np.array(0, dtype=object))
def test_empty_object(self):
for shape in [(4, 0), (4,)]:
arr = np.empty(shape=shape, dtype=object)
result = isnull(arr)
expected = np.ones(shape=shape, dtype=bool)
tm.assert_numpy_array_equal(result, expected)
def test_isnull(self):
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert float('nan')
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
| tm.makeObjectSeries() | pandas.util.testing.makeObjectSeries |
"""
Main functions for interacting with LAtools.
(c) <NAME> : https://github.com/oscarbranson
"""
import configparser
import itertools
import inspect
import json
import os
import re
import time
import warnings
import dateutil
import textwrap
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import pkg_resources as pkgrs
import uncertainties as unc
import uncertainties.unumpy as un
from sklearn.preprocessing import minmax_scale, scale
from sklearn.cluster import KMeans
from scipy.optimize import curve_fit
from .helpers import plot
from .filtering import filters
from .filtering.classifier_obj import classifier
from .processes import read_data
from .preprocessing.split import long_file
from .D_obj import D
from .helpers import Bunch
from .helpers.plot import rangecalc
from .helpers.signal import rolling_window, enumerate_bool, calc_grads
from .helpers import logging
from .helpers.logging import _log
from .helpers.config import read_configuration, config_locator
from .helpers.stat_fns import *
from .helpers import utils
from .helpers import srm as srms
from .helpers.progressbars import progressbar
from .helpers.chemistry import analyte_mass, decompose_molecule
from .helpers.analytes import get_analyte_name, analyte_2_massname, pretty_element, unitpicker, analyte_sort_fn, analyte_checker, split_analyte_ratios
from .helpers.io import get_date
idx = pd.IndexSlice # multi-index slicing!
# deactivate IPython deprecations warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
# deactivate numpy invalid comparison warnings
np.seterr(invalid='ignore')
# TODO: Allow full sklearn integration by allowing sample-wise application of custom classifiers. i.e. Provide data collection (get_data) ajd filter addition API.
# Especially: PCA, Gaussian Mixture Models
# TODO: Move away from single `internal_standard` specification towards specifying multiple internal standards.
# TODO: Add 'smooth all' function.
class analyse(object):
"""
For processing and analysing whole LA - ICPMS datasets.
Parameters
----------
data_path : str
The path to a directory containing multiple data files.
errorhunt : bool
If True, latools prints the name of each file before it
imports the data. This is useful for working out which
data file is causing the import to fail.
config : str
The name of the configuration to use for the analysis.
This determines which configuration set from the
latools.cfg file is used, and overrides the default
configuration setup. You might sepcify this if your lab
routinely uses two different instruments.
dataformat : str or dict
Either a path to a data format file, or a
dataformat dict. See documentation for more details.
extension : str
The file extension of your data files. Defaults to
'.csv'.
srm_identifier : str
A string used to separate samples and standards. srm_identifier
must be present in all standard measurements. Defaults to
'STD'.
cmap : dict
A dictionary of {analyte: colour} pairs. Colour can be any valid
matplotlib colour string, RGB or RGBA sequence, or hex string.
time_format : str
A regex string identifying the time format, used by pandas when
created a universal time scale. If unspecified (None), pandas
attempts to infer the time format, but in some cases this might
not work.
internal_standard : str
The name of the analyte used as an internal standard throughout
analysis.
file_structure : str
This specifies whether latools should expect multiplte files in a folder ('multi')
or a single file containing multiple analyses ('long'). Default is 'multi'.
names : str or array-like
If file_structure is 'multi', this should be either:
* 'file_names' : use the file names as labels (default)
* 'metadata_names' : used the 'names' attribute of metadata as the name
anything else : use numbers.
If file_structure is 'long', this should be a list of names for the ablations
in the file. The wildcards '+' and '*' are supported in file names, and are used
when the number of ablations does not match the number of sample names provided.
If a sample name contains '+', all ablations that are not specified in the list
are combined into a single file and given this name. If a sample name contains '*'
these are analyses are numbered sequentially and split into separate files.
For example, if you have 5 ablations with one standard at the start and stop you
could provide one of:
* names = ['std', 'sample+', 'std'], which would divide the long file into [std, sample (containing three ablations), std].
* names = ['std', 'sample+', 'std'], which would divide the long file into [std, sample0, sample1, sample2, std], where each
name is associated with a single ablation.
split_kwargs : dict
Arguments to pass to latools.split.long_file()
Attributes
----------
path : str
Path to the directory containing the data files, as
specified by `data_path`.
dirname : str
The name of the directory containing the data files,
without the entire path.
files : array_like
A list of all files in `folder`.
param_dir : str
The directory where parameters are stored.
report_dir : str
The directory where plots are saved.
data : dict
A dict of `latools.D` data objects, labelled by sample
name.
samples : array_like
A list of samples.
analytes : array_like
A list of analytes measured.
stds : array_like
A list of the `latools.D` objects containing hte SRM
data. These must contain srm_identifier in the file name.
srm_identifier : str
A string present in the file names of all standards.
cmaps : dict
An analyte - specific colour map, used for plotting.
"""
def __init__(self, data_path, errorhunt=False, config='DEFAULT',
dataformat=None, extension='.csv', srm_identifier='STD',
cmap=None, time_format=None, internal_standard='Ca43',
file_structure='multi', names='file_names', srm_file=None, pbar=None, split_kwargs={}):
"""
For processing and analysing whole LA - ICPMS datasets.
"""
# initialise log
params = {k: v for k, v in locals().items() if k not in ['self', 'pbar']}
self.log = ['__init__ :: args=() kwargs={}'.format(str(params))]
# assign file paths
self.path = os.path.realpath(data_path)
self.parent_folder = os.path.dirname(self.path)
# set line length for outputs
self._line_width = 80
# make output directories
self.report_dir = re.sub('//', '/',
os.path.join(self.parent_folder,
os.path.splitext(os.path.basename(self.path))[0] + '_reports/'))
if not os.path.isdir(self.report_dir):
os.mkdir(self.report_dir)
self.export_dir = re.sub('//', '/',
os.path.join(self.parent_folder,
os.path.splitext(os.path.basename(self.path))[0] + '_export/'))
if not os.path.isdir(self.export_dir):
os.mkdir(self.export_dir)
# set up file paths
self._file_internal_standard_massfrac = os.path.join(self.export_dir, 'internal_standard_massfrac.csv')
# load configuration parameters
self.config = read_configuration(config)
# print some info about the analysis and setup.
startmsg = self._fill_line('-') + 'Starting analysis:'
if srm_file is None or dataformat is None:
startmsg += '\n Using {} configuration'.format(self.config['config'])
if config == 'DEFAULT':
startmsg += ' (default).'
else:
startmsg += '.'
pretext = ' with'
else:
pretext = 'Using'
if srm_file is not None:
startmsg += '\n ' + pretext + ' custom srm_file ({})'.format(srm_file)
if isinstance(dataformat, str):
startmsg += '\n ' + pretext + ' custom dataformat file ({})'.format(dataformat)
elif isinstance(dataformat, dict):
startmsg += '\n ' + pretext + ' custom dataformat dict'
print(startmsg)
self._load_srmfile(srm_file)
self._load_dataformat(dataformat)
# link up progress bars
if pbar is None:
self.pbar = progressbar()
else:
self.pbar = pbar
if file_structure == 'multi':
self.files = np.array([f for f in os.listdir(self.path)
if extension in f])
# load data into list (initialise D objects)
with self.pbar.set(total=len(self.files), desc='Loading Data') as prog:
data = [None] * len(self.files)
for i, f in enumerate(self.files):
data_passthrough = read_data(data_file=os.path.join(self.path, f), dataformat=self.dataformat, name_mode=names)
data[i] = D(passthrough=(f, *data_passthrough))
# data[i] = (D(os.path.join(self.path, f),
# dataformat=self.dataformat,
# errorhunt=errorhunt,
# cmap=cmap,
# internal_standard=internal_standard,
# name=names))
prog.update()
elif file_structure == 'long':
data = []
print(self.path)
for data_passthrough in long_file(data_file=self.path, dataformat=self.dataformat, sample_list=names, passthrough=True, **split_kwargs):
data.append(D(passthrough=data_passthrough))
# create universal time scale
if 'date' in data[0].meta.keys():
if (time_format is None) and ('time_format' in self.dataformat.keys()):
time_format = self.dataformat['time_format']
start_times = []
for d in data:
start_times.append(get_date(d.meta['date'], time_format))
min_time = min(start_times)
for d, st in zip(data, start_times):
d.uTime = d.Time + (st - min_time).seconds
else:
ts = 0
for d in data:
d.uTime = d.Time + ts
ts += d.Time[-1]
msg = self._wrap_text(
"Time not determined from dataformat. Universal time scale " +
"approximated as continuously measured samples. " +
"Samples might not be in the right order. "
"Background correction and calibration may not behave " +
"as expected.")
warnings.warn(self._wrap_msg(msg, '*'))
self.max_time = max([d.uTime.max() for d in data])
# sort data by uTime
data.sort(key=lambda d: d.uTime[0])
# process sample names
if (names == 'file_names') | (names == 'metadata_names'):
samples = np.array([s.sample for s in data], dtype=object) # get all sample names
# if duplicates, rename them
usamples, ucounts = np.unique(samples, return_counts=True)
if usamples.size != samples.size:
dups = usamples[ucounts > 1] # identify duplicates
nreps = ucounts[ucounts > 1] # identify how many times they repeat
for d, n in zip(dups, nreps): # cycle through duplicates
new = [d + '_{}'.format(i) for i in range(n)] # append number to duplicate names
ind = samples == d
samples[ind] = new # rename in samples
for s, ns in zip([data[i] for i in np.where(ind)[0]], new):
s.sample = ns # rename in D objects
elif file_structure == 'long':
samples = np.array([s.sample for s in data], dtype=object)
else:
samples = np.arange(len(data)) # assign a range of numbers
for i, s in enumerate(samples):
data[i].sample = s
self.samples = samples
# copy colour map to top level
self.cmaps = data[0].cmap
# get analytes
# TODO: does this preserve the *order* of the analytes?
all_analytes = set()
extras = set()
for d in data:
all_analytes.update(d.analytes)
extras.update(all_analytes.symmetric_difference(d.analytes))
self.analytes = all_analytes.difference(extras)
mismatch = []
if self.analytes != all_analytes:
smax = 0
for d in data:
if d.analytes != self.analytes:
mismatch.append((d.sample, d.analytes.difference(self.analytes)))
if len(d.sample) > smax:
smax = len(d.sample)
msg = (self._fill_line('*') +
'All data files do not contain the same analytes.\n' +
'Only analytes present in all files will be processed.\n' +
'In the following files, these analytes will be excluded:\n')
for s, a in mismatch:
msg += (' {0: <' + '{:}'.format(smax + 2) + '}: ').format(s) + str(a) + '\n'
msg += self._fill_line('*')
warnings.warn(msg)
# set for recording calculated ratios
self.analyte_ratios = set()
self.uncalibrated = set()
if len(self.analytes) == 0:
raise ValueError('No analyte names identified. Please check the \ncolumn_id > pattern ReGeX in your dataformat file.')
if internal_standard in self.analytes:
self.internal_standard = internal_standard
else:
self.internal_standard = None
warnings.warn(
self._wrap_text(f'The specified internal_standard {internal_standard} is not in the list of analytes ({self.analytes}). You will have to specify a valid analyte when calling the `ratio()` function later in the analysis.')
)
self.internal_standard_concs = None
self.minimal_analytes = set()
# record which analytes are needed for calibration
self.calibration_analytes = set()
# keep record of which stages of processing have been performed
self.stages_complete = set(['rawdata'])
# From this point on, data stored in dicts
self.data = Bunch(zip(self.samples, data))
# remove mismatch analytes - QUICK-FIX - SHOULD BE DONE HIGHER UP?
for s, a in mismatch:
self.data[s].analytes = self.data[s].analytes.difference(a)
# get SRM info
self.srm_identifier = srm_identifier
self.stds = [] # make this a dict
_ = [self.stds.append(s) for s in self.data.values()
if self.srm_identifier in s.sample]
self.srms_ided = False
# set up focus_stage recording
self.focus_stage = 'rawdata'
self.stat_focus_stage = None
self.focus = Bunch()
# set up subsets
self.clear_subsets()
# remove any analytes for which all counts are zero
# self.get_focus()
# for a in self.analytes:
# if np.nanmean(self.focus[a] == 0):
# self.analytes.remove(a)
# warnings.warn('{} contains no data - removed from analytes')
# initialise classifiers
self.classifiers = Bunch()
# report
print(('Loading Data:\n {:d} Data Files Loaded: {:d} standards, {:d} '
'samples').format(len(self.data),
len(self.stds),
len(self.data) - len(self.stds)))
astr = self._wrap_text('Analytes: ' + ' '.join(self.analytes_sorted()))
print(astr)
print(' Internal Standard: {}'.format(self.internal_standard))
def _fill_line(self, char, newline=True):
"""Generate a full line of given character"""
if newline:
return char * self._line_width + '\n'
else:
return char * self._line_width
def _wrap_text(self, text):
"""Splits text over multiple lines to fit within self._line_width"""
return '\n'.join(textwrap.wrap(text, width=self._line_width,
break_long_words=False))
def _wrap_msg(self, msg, char):
return self._fill_line(char) + msg + '\n' + self._fill_line(char, False)
def _load_dataformat(self, dataformat):
"""
Load in dataformat.
Check dataformat file exists, and store it in a class attribute.
If dataformat is not provided during initialisation, assign it
fom configuration file
"""
if dataformat is None:
if os.path.exists(self.config['dataformat']):
dataformat = self.config['dataformat']
elif os.path.exists(pkgrs.resource_filename('latools',
self.config['dataformat'])):
dataformat = pkgrs.resource_filename('latools',
self.config['dataformat'])
else:
config_file = config_locator()
raise ValueError(('The dataformat file specified in the ' +
self.config['config'] + ' configuration cannot be found.\n'
'Please make sure the file exists, and that'
'the path in the config file is correct.\n'
'Your configurations can be found here:'
' {}\n'.format(config_file)))
self.dataformat_file = dataformat
else:
self.dataformat_file = 'None: dict provided'
# if it's a string, check the file exists and import it.
if isinstance(dataformat, str):
if os.path.exists(dataformat):
# self.dataformat = eval(open(dataformat).read())
self.dataformat = json.load(open(dataformat))
else:
warnings.warn(("The dataformat file (" + dataformat +
") cannot be found.\nPlease make sure the file "
"exists, and that the path is correct.\n\nFile "
"Path: " + dataformat))
# if it's a dict, just assign it straight away.
elif isinstance(dataformat, dict):
self.dataformat = dataformat
def _load_srmfile(self, srm_file):
"""
Check srmfile exists, and store it in a class attribute.
"""
if srm_file is not None:
if os.path.exists(srm_file):
self.srmfile = srm_file
else:
raise ValueError(('Cannot find the specified SRM file:\n ' +
srm_file +
'Please check that the file location is correct.'))
else:
if os.path.exists(self.config['srmfile']):
self.srmfile = self.config['srmfile']
elif os.path.exists(pkgrs.resource_filename('latools',
self.config['srmfile'])):
self.srmfile = pkgrs.resource_filename('latools',
self.config['srmfile'])
else:
config_file = config_locator()
raise ValueError(('The SRM file specified in the ' + self.config['config'] +
' configuration cannot be found.\n'
'Please make sure the file exists, and that the '
'path in the config file is correct.\n'
'Your configurations can be found here:'
' {}\n'.format(config_file)))
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
samples = self.subsets[subset]
except KeyError:
raise KeyError(("Subset '{:s}' does not ".format(subset) +
"exist.\nUse 'make_subset' to create a" +
"subset."))
return samples
def _log_header(self):
return ['# LATOOLS analysis log saved at {}'.format(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_path :: {}'.format(self.path),
'# Analysis Log Start: \n'
]
def _analyte_checker(self, analytes=None, check_ratios=True, single=False, focus_stage=None):
"""
Return valid analytes depending on the analysis stage
"""
return analyte_checker(self, analytes=analytes, check_ratios=check_ratios, single=single, focus_stage=focus_stage)
def analytes_sorted(self, analytes=None, check_ratios=True, single=False, focus_stage=None):
return sorted(self._analyte_checker(analytes=analytes, check_ratios=check_ratios, single=single, focus_stage=focus_stage), key=analyte_sort_fn)
@_log
def basic_processing(self,
noise_despiker=True, despike_win=3, despike_nlim=12., # despike args
despike_maxiter=4,
autorange_analyte='total_counts', autorange_gwin=5, autorange_swin=3, autorange_win=20, # autorange args
autorange_on_mult=[1., 1.5], autorange_off_mult=[1.5, 1],
autorange_transform='log',
bkg_weight_fwhm=300., # bkg_calc_weightedmean
bkg_n_min=20, bkg_n_max=None, bkg_cstep=None,
bkg_filter=False, bkg_f_win=7, bkg_f_n_lim=3,
bkg_errtype='stderr', # bkg_sub
calib_drift_correct=True, # calibrate
calib_srms_used=['NIST610', 'NIST612', 'NIST614'],
calib_zero_intercept=True, calib_n_min=10,
plots=True):
self.despike(noise_despiker=noise_despiker,
win=despike_win, nlim=despike_nlim,
maxiter=despike_maxiter)
self.autorange(analyte=autorange_analyte, gwin=autorange_gwin, swin=autorange_swin,
win=autorange_win, on_mult=autorange_on_mult,
off_mult=autorange_off_mult,
transform=autorange_transform)
if plots:
self.trace_plots(ranges=True)
self.bkg_calc_weightedmean(weight_fwhm=bkg_weight_fwhm, n_min=bkg_n_min, n_max=bkg_n_max,
cstep=bkg_cstep, bkg_filter=bkg_filter, f_win=bkg_f_win, f_n_lim=bkg_f_n_lim)
if plots:
self.bkg_plot()
self.bkg_subtract(errtype=bkg_errtype)
self.ratio()
self.calibrate(drift_correct=calib_drift_correct, srms_used=calib_srms_used,
zero_intercept=calib_zero_intercept, n_min=calib_n_min)
if plots:
self.calibration_plot()
return
@_log
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=20,
on_mult=[1., 1.5], off_mult=[1.5, 1],
transform='log', ploterrs=True, focus_stage='despiked', **kwargs):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
This can also be 'total_counts' to use the sum of all analytes.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
smwin : int
The smoothing window used for calculating the second derivative.
Must be odd.
conf : float
The proportional intensity of the fitted gaussian tails that
determines the transition width cutoff (lower = wider transition
regions excluded).
trans_mult : array_like, len=2
Multiples of the peak FWHM to add to the transition cutoffs, e.g.
if the transitions consistently leave some bad data proceeding the
transition, set trans_mult to [0, 0.5] to ad 0.5 * the FWHM to the
right hand side of the limit.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked', or rawdata' if not despiked. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
if analyte in self.analytes:
self.minimal_analytes.update([analyte])
fails = {} # list for catching failures.
with self.pbar.set(total=len(self.data), desc='AutoRange') as prog:
for s, d in self.data.items():
f = d.autorange(analyte=analyte, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
ploterrs=ploterrs, transform=transform, **kwargs)
if f is not None:
fails[s] = f
prog.update() # advance progress bar
# handle failures
if len(fails) > 0:
wstr = ('\n\n' + '*' * 41 + '\n' +
' WARNING\n' + '*' * 41 + '\n' +
'Autorange failed for some samples:\n')
kwidth = max([len(k) for k in fails.keys()]) + 1
fstr = ' {:' + '{}'.format(kwidth) + 's}: '
for k in sorted(fails.keys()):
wstr += fstr.format(k) + ', '.join(['{:.1f}'.format(f) for f in fails[k][-1]]) + '\n'
wstr += ('\n*** THIS IS NOT NECESSARILY A PROBLEM ***\n' +
'But please check the plots below to make\n' +
'sure they look OK. Failures are marked by\n' +
'dashed vertical red lines.\n\n' +
'To examine an autorange failure in more\n' +
'detail, use the `autorange_plot` method\n' +
'of the failing data object, e.g.:\n' +
"dat.data['Sample'].autorange_plot(params)\n" +
'*' * 41 + '\n')
warnings.warn(wstr)
self.stages_complete.update(['autorange'])
return
def find_expcoef(self, nsd_below=0., plot=False,
trimlim=None, autorange_kwargs={}):
"""
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
"""
print('Calculating exponential decay coefficient\nfrom SRM washouts...')
def findtrim(tr, lim=None):
trr = np.roll(tr, -1)
trr[-1] = 0
if lim is None:
lim = 0.5 * np.nanmax(tr - trr)
ind = (tr - trr) >= lim
return np.arange(len(ind))[ind ^ np.roll(ind, -1)][0]
if not hasattr(self.stds[0], 'trnrng'):
for s in self.stds:
s.autorange(**autorange_kwargs, ploterrs=False)
trans = []
times = []
for v in self.stds:
for trnrng in v.trnrng[-1::-2]:
tr = minmax_scale(v.data['total_counts'][(v.Time > trnrng[0]) & (v.Time < trnrng[1])])
sm = np.apply_along_axis(np.nanmean, 1,
rolling_window(tr, 3, pad=0))
sm[0] = sm[1]
trim = findtrim(sm, trimlim) + 2
trans.append(minmax_scale(tr[trim:]))
times.append(np.arange(tr[trim:].size) *
np.diff(v.Time[1:3]))
times = np.concatenate(times)
times = np.round(times, 2)
trans = np.concatenate(trans)
ti = []
tr = []
for t in np.unique(times):
ti.append(t)
tr.append(np.nanmin(trans[times == t]))
def expfit(x, e):
"""
Exponential decay function.
"""
return np.exp(e * x)
ep, ecov = curve_fit(expfit, ti, tr, p0=(-1.))
eeR2 = R2calc(trans, expfit(times, ep))
if plot:
fig, ax = plt.subplots(1, 1, figsize=[6, 4])
ax.scatter(times, trans, alpha=0.2, color='k', marker='x', zorder=-2)
ax.scatter(ti, tr, alpha=1, color='k', marker='o')
fitx = np.linspace(0, max(ti))
ax.plot(fitx, expfit(fitx, ep), color='r', label='Fit')
ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**.5, ),
color='b', label='Used')
ax.text(0.95, 0.75,
('y = $e^{%.2f \pm %.2f * x}$\n$R^2$= %.2f \nCoefficient: '
'%.2f') % (ep,
np.diag(ecov)**.5,
eeR2,
ep - nsd_below * np.diag(ecov)**.5),
transform=ax.transAxes, ha='right', va='top', size=12)
ax.set_xlim(0, ax.get_xlim()[-1])
ax.set_xlabel('Time (s)')
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel('Proportion of Signal')
plt.legend()
if isinstance(plot, str):
fig.savefig(plot)
self.expdecay_coef = ep - nsd_below * np.diag(ecov)**.5
print(' {:0.2f}'.format(self.expdecay_coef[0]))
return
@_log
def despike(self, expdecay_despiker=False, exponent=None,
noise_despiker=True, win=3, nlim=12., exponentrace_plot=False,
maxiter=4, autorange_kwargs={}, focus_stage='rawdata'):
"""
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentrace_plot : bool
Whether or not to show a plot of the automatically determined
exponential decay exponent.
maxiter : int
The max number of times that the fitler is applied.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'rawdata'. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
None
"""
if focus_stage != self.focus_stage:
self.set_focus(focus_stage)
if expdecay_despiker and exponent is None:
if not hasattr(self, 'expdecay_coef'):
self.find_expcoef(plot=exponentrace_plot,
autorange_kwargs=autorange_kwargs)
exponent = self.expdecay_coef
time.sleep(0.1)
with self.pbar.set(total=len(self.data), desc='Despiking') as prog:
for d in self.data.values():
d.despike(expdecay_despiker, exponent,
noise_despiker, win, nlim, maxiter)
prog.update()
self.stages_complete.update(['despiked'])
self.focus_stage = 'despiked'
return
# functions for background correction
def get_background(self, n_min=10, n_max=None, focus_stage='despiked', bkg_filter=False, f_win=5, f_n_lim=3):
"""
Extract all background data from all samples on universal time scale.
Used by both 'polynomial' and 'weightedmean' methods.
Parameters
----------
n_min : int
The minimum number of points a background region must
have to be included in calculation.
n_max : int
The maximum number of points a background region must
have to be included in calculation.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
pandas.DataFrame object containing background data.
"""
allbkgs = {'uTime': [],
'ns': []}
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
for a in self.analytes:
allbkgs[a] = []
n0 = 0
for s in self.data.values():
if sum(s.bkg) > 0:
allbkgs['uTime'].append(s.uTime[s.bkg])
allbkgs['ns'].append(enumerate_bool(s.bkg, n0)[s.bkg])
n0 = allbkgs['ns'][-1][-1]
for a in self.analytes:
allbkgs[a].append(s.data[focus_stage][a][s.bkg])
allbkgs.update((k, np.concatenate(v)) for k, v in allbkgs.items())
bkgs = pd.DataFrame(allbkgs) # using pandas here because it's much more efficient than loops.
self.bkg = Bunch()
# extract background data from whole dataset
if n_max is None:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: len(x) > n_min)
else:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: (len(x) > n_min) & (len(x) < n_max))
# calculate per - background region stats
self.bkg['summary'] = self.bkg['raw'].groupby('ns').aggregate([np.mean, np.std, stderr])
# sort summary by uTime
self.bkg['summary'].sort_values(('uTime', 'mean'), inplace=True)
# self.bkg['summary'].index = np.arange(self.bkg['summary'].shape[0])
# self.bkg['summary'].index.name = 'ns'
if bkg_filter:
# calculate rolling mean and std from summary
t = self.bkg['summary'].loc[:, idx[:, 'mean']]
r = t.rolling(f_win).aggregate([np.nanmean, np.nanstd])
# calculate upper threshold
upper = r.loc[:, idx[:, :, 'nanmean']] + f_n_lim * r.loc[:, idx[:, :, 'nanstd']].values
# calculate which are over upper threshold
over = r.loc[:, idx[:, :, 'nanmean']] > np.roll(upper.values, 1, 0)
# identify them
ns_drop = over.loc[over.apply(any, 1), :].index.values
# drop them from summary
self.bkg['summary'].drop(ns_drop, inplace=True)
# remove them from raw
ind = np.ones(self.bkg['raw'].shape[0], dtype=bool)
for ns in ns_drop:
ind = ind & (self.bkg['raw'].loc[:, 'ns'] != ns)
self.bkg['raw'] = self.bkg['raw'].loc[ind, :]
return
@_log
def bkg_calc_weightedmean(self, analytes=None, weight_fwhm=600,
n_min=20, n_max=None, cstep=None, errtype='stderr',
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
# Gaussian - weighted average
if 'calc' not in self.bkg.keys():
# create time points to calculate background
if cstep is None:
cstep = weight_fwhm / 20
elif cstep > weight_fwhm:
warnings.warn("\ncstep should be less than weight_fwhm. Your backgrounds\n" +
"might not behave as expected.\n")
bkg_t = np.linspace(0,
self.max_time,
int(self.max_time // cstep))
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
# TODO : calculation then dict assignment is clumsy...
mean, std, stderr = gauss_weighted_stats(self.bkg['raw'].uTime,
self.bkg['raw'].loc[:, analytes].values,
self.bkg['calc']['uTime'],
fwhm=weight_fwhm)
self.bkg_interps = {}
for i, a in enumerate(analytes):
self.bkg['calc'][a] = {'mean': mean[i],
'std': std[i],
'stderr': stderr[i]}
self.bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
y=un.uarray(self.bkg['calc'][a]['mean'],
self.bkg['calc'][a][errtype]))
@_log
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=30,
bkg_filter=False, f_win=7, f_n_lim=3, errtype='stderr', focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
def pad(a, lo=None, hi=None):
if lo is None:
lo = [a[0]]
if hi is None:
hi = [a[-1]]
return np.concatenate((lo, a, hi))
if 'calc' not in self.bkg.keys():
# create time points to calculate background
bkg_t = pad(np.ravel(self.bkg.raw.loc[:, ['uTime', 'ns']].groupby('ns').aggregate([min, max])))
bkg_t = np.unique(np.sort(np.concatenate([bkg_t, np.arange(0, self.max_time, cstep)])))
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
d = self.bkg['summary']
self.bkg_interps = {}
with self.pbar.set(total=len(analytes), desc='Calculating Analyte Backgrounds') as prog:
for a in analytes:
fill_vals = (un.uarray(d.loc[:, (a, 'mean')].iloc[0], d.loc[:, (a, errtype)].iloc[0]),
un.uarray(d.loc[:, (a, 'mean')].iloc[-1], d.loc[:, (a, errtype)].iloc[-1]))
p = un_interp1d(x=d.loc[:, ('uTime', 'mean')],
y=un.uarray(d.loc[:, (a, 'mean')],
d.loc[:, (a, errtype)]),
kind=kind, bounds_error=False, fill_value=fill_vals)
self.bkg_interps[a] = p
self.bkg['calc'][a] = {'mean': p.new_nom(self.bkg['calc']['uTime']),
errtype: p.new_std(self.bkg['calc']['uTime'])}
prog.update()
# self.bkg['calc']
return
@_log
def bkg_subtract(self, analytes=None, errtype='stderr', focus_stage='despiked'):
"""
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
analytes = self._analyte_checker(analytes)
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
# make uncertainty-aware background interpolators
# bkg_interps = {}
# for a in analytes:
# bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
# y=un.uarray(self.bkg['calc'][a]['mean'],
# self.bkg['calc'][a][errtype]))
# self.bkg_interps = bkg_interps
# apply background corrections
with self.pbar.set(total=len(self.data), desc='Background Subtraction') as prog:
for d in self.data.values():
# [d.bkg_subtract(a, bkg_interps[a].new(d.uTime), None, focus_stage=focus_stage) for a in analytes]
[d.bkg_subtract(a, self.bkg_interps[a].new(d.uTime), ~d.sig, focus_stage=focus_stage) for a in analytes]
d.setfocus('bkgsub')
prog.update()
self.stages_complete.update(['bkgsub'])
self.focus_stage = 'bkgsub'
return
@_log
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub'). To undo a correction,
rerun `self.bkg_subtract()`.
Example
-------
To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da
peaks are known:
f = abundance(88Sr) / (abundance(87Sr)
counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
with self.pbar.set(total=len(self.data), desc='Interference Correction') as prog:
for d in self.data.values():
d.correct_spectral_interference(target_analyte, source_analyte, f)
prog.update()
@_log
def bkg_plot(self, analytes=None, figsize=None, yscale='log',
ylim=None, err='stderr', save=True):
"""
Plot the calculated background.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot.
figsize : tuple
The (width, height) of the figure, in inches.
If None, calculated based on number of samples.
yscale : str
'log' (default) or 'linear'.
ylim : tuple
Manually specify the y scale.
err : str
What type of error to plot. Default is stderr.
save : bool
If True, figure is saved.
Returns
-------
fig, ax : matplotlib.figure, matplotlib.axes
"""
# if not hasattr(self, 'bkg'):
# raise ValueError("\nPlease calculate a background before attempting to\n" +
# "plot it... either:\n" +
# " bkg_calc_interp1d\n" +
# " bkg_calc_weightedmean\n")
if not hasattr(self, 'bkg'):
self.get_background()
analytes = self._analyte_checker(analytes)
if figsize is None:
if len(self.samples) > 50:
figsize = (len(self.samples) * 0.2, 5)
else:
figsize = (10, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.07, .1, .84, .8])
with self.pbar.set(total=len(analytes), desc='Plotting backgrounds') as prog:
for a in analytes:
# draw data points
ax.scatter(self.bkg['raw'].uTime, self.bkg['raw'].loc[:, a],
alpha=0.5, s=3, c=self.cmaps[a],
lw=0.5)
# draw STD boxes
for i, r in self.bkg['summary'].iterrows():
x = (r.loc['uTime', 'mean'] - r.loc['uTime', 'std'] * 2,
r.loc['uTime', 'mean'] + r.loc['uTime', 'std'] * 2)
yl = [r.loc[a, 'mean'] - r.loc[a, err]] * 2
yu = [r.loc[a, 'mean'] + r.loc[a, err]] * 2
ax.fill_between(x, yl, yu, alpha=0.8, lw=0.5, color=self.cmaps[a], zorder=1)
prog.update()
if yscale == 'log':
ax.set_yscale('log')
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim() * np.array([1, 10])) # x10 to make sample names readable.
if 'calc' in self.bkg:
for a in analytes:
# draw confidence intervals of calculated
x = self.bkg['calc']['uTime']
y = self.bkg['calc'][a]['mean']
yl = self.bkg['calc'][a]['mean'] - self.bkg['calc'][a][err]
yu = self.bkg['calc'][a]['mean'] + self.bkg['calc'][a][err]
# trim values below zero if log scale=
if yscale == 'log':
yl[yl < ax.get_ylim()[0]] = ax.get_ylim()[0]
ax.plot(x, y,
c=self.cmaps[a], zorder=2, label=pretty_element(a))
ax.fill_between(x, yl, yu,
color=self.cmaps[a], alpha=0.3, zorder=-1)
else:
for a in analytes:
ax.plot([], [], c=self.cmaps[a], label=pretty_element(a))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Background Counts')
ax.set_title('Points = raw data; Bars = {:s}; Lines = Calculated Background; Envelope = Background {:s}'.format(err, err),
fontsize=10)
ha, la = ax.get_legend_handles_labels()
ax.legend(labels=la[:len(analytes)], handles=ha[:len(analytes)], bbox_to_anchor=(1, 1))
# scale x axis to range ± 2.5%
xlim = [0, max([d.uTime[-1] for d in self.data.values()])]
ax.set_xlim(xlim)
# add sample labels
for s, d in self.data.items():
ax.axvline(d.uTime[0], alpha=0.2, color='k', zorder=-1)
ax.text(d.uTime[0], ax.get_ylim()[1], s, rotation=90,
va='top', ha='left', zorder=-1, fontsize=7)
if save:
fig.savefig(self.report_dir + '/background.png', dpi=200)
return fig, ax
# functions for calculating ratios
@_log
def ratio(self, internal_standard=None, analytes=None, focus_stage=None):
"""
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
"""
if focus_stage is None:
focus_stage = self.focus_stage
if 'bkgsub' not in self.stages_complete:
raise RuntimeError('Cannot calculate ratios before background subtraction.')
analytes = self._analyte_checker(analytes, focus_stage=focus_stage)
if internal_standard is not None:
self.internal_standard = internal_standard
if self.internal_standard in self.analytes.union(self.analyte_ratios):
self.minimal_analytes.update([internal_standard])
self.calibration_analytes.update([internal_standard])
self.calibration_analytes.update(analytes)
else:
raise ValueError('The internal standard ({}) is not amongst the '.format(internal_standard) +
'analytes in\nyour data files. Please make sure it is specified correctly.')
# check internal_standard is valid
internal_standard = self._analyte_checker(self.internal_standard, focus_stage=focus_stage).pop()
with self.pbar.set(total=len(self.data), desc='Ratio Calculation') as prog:
for s in self.data.values():
s.ratio(internal_standard=internal_standard, analytes=analytes, focus_stage=focus_stage)
self.analyte_ratios.update(s.analyte_ratios)
self.cmaps.update(s.cmap)
prog.update()
if self.focus_stage not in ['ratios', 'calibrated', 'mass_fraction']:
self.stages_complete.update(['ratios'])
self.focus_stage = 'ratios'
return
def srm_load_database(self, srms_used=None, reload=False):
if not hasattr(self, 'srmdat') or reload:
# load SRM info
srmdat = srms.read_table(self.srmfile)
srmdat = srmdat.loc[srms_used]
srmdat.reset_index(inplace=True)
srmdat.set_index(['SRM', 'Item'], inplace=True)
# calculate ratios to internal_standard for calibration ratios
analyte_srm_link = {}
warns = {}
self.uncalibrated = set()
self._analytes_missing_from_srm = set()
# create an empty SRM table
srmtab = pd.DataFrame(index=srms_used, columns=pd.MultiIndex.from_product([self.analyte_ratios, ['mean', 'err']]))
for srm in srms_used:
srm_nocal = set()
srmsub = srmdat.loc[srm]
# determine analyte - Item pairs in table
ad = {}
for ar in self.analyte_ratios:
a_num, a_denom = ar.split('_') # separate numerator and denominator
for a in [a_num, a_denom]:
if a in ad.keys():
continue
# check if there's an exact match of form [Mass][Element] in srmdat
mna = analyte_2_massname(a)
if mna in srmsub.index:
ad[a] = mna
else:
# if not, match by element name.
item = srmsub.index[srmsub.index.str.contains(get_analyte_name(a))].values
if len(item) > 1:
item = item[item == get_analyte_name(a)]
if len(item) == 1:
ad[a] = item[0]
else:
if srm not in warns:
warns[srm] = []
warns[srm].append(a)
srm_nocal.update([ar])
analyte_srm_link[srm] = ad
# build calibration database for given ratios
for a in self.analyte_ratios.difference(srm_nocal):
a_num, a_denom = a.split('_')
# calculate SRM polyatom multiplier (multiplier to account for stoichiometry,
# e.g. if internal standard is Na, N will be 2 if measured in SRM as Na2O)
N_denom = float(decompose_molecule(ad[a_denom])[get_analyte_name(a_denom)])
N_num = float(decompose_molecule(ad[a_num])[get_analyte_name(a_num)])
# calculate molar ratio
srmtab.loc[srm, (a, 'mean')] = ((srmdat.loc[(srm, ad[a_num]), 'mol/g'] * N_num) /
(srmdat.loc[(srm, ad[a_denom]), 'mol/g'] * N_denom))
srmtab.loc[srm, (a, 'err')] = (srmtab.loc[srm, (a, 'mean')] *
((srmdat.loc[(srm, ad[a_num]), 'mol/g_err'] / (srmdat.loc[(srm, ad[a_num]), 'mol/g']))**2 +
(srmdat.loc[(srm, ad[a_denom]), 'mol/g_err'] / (srmdat.loc[(srm, ad[a_denom]), 'mol/g']))**2)**0.5)
# where uncertainties are missing, replace with zeros
srmtab[srmtab.loc[:, idx[:, 'err']].isnull()] = 0
# record outputs
self.srmdat = srmdat # the full SRM table
self._analyte_srmdat_link = analyte_srm_link # dict linking analyte names to rows in srmdat
self.srmtab = srmtab.astype(float) # a summary of relevant mol/mol values only
# record which analytes have missing CRM data
means = self.srmtab.loc[:, idx[:, 'mean']]
means.columns = means.columns.droplevel(1)
self._analytes_missing_srm = means.columns.values[means.isnull().any()] # analyte ratios missing from SRM table
self._srm_id_analyte_ratios = means.columns.values[~means.isnull().any()] # analyte ratioes identified
# self._calib_analyte_ratios = means.columns.values[~means.isnull().all()]
self.uncalibrated.intersection_update(srm_nocal)
self._analytes_missing_from_srm.update(srm_nocal)
# Print any warnings
if len(warns) > 0:
print('WARNING: Some analytes are not present in the SRM database for some standards:')
for srm, a in warns.items():
print(f' {srm}: ' + ', '.join(self.analytes_sorted(a, focus_stage='bkgsub')))
if len(self.uncalibrated) > 0:
self.analyte_ratios.difference_update(self.uncalibrated)
print('WARNING: Some analytes are not present in the SRM database for ANY standards:')
print(f'{self.uncalibrated} have been removed from further analysis.')
def srm_compile_measured(self, n_min=10, focus_stage='ratios'):
"""
Compile mean and standard errors of measured SRMs
Parameters
----------
n_min : int
The minimum number of points to consider as a valid measurement.
Default = 10.
"""
warns = []
# compile mean and standard errors of samples
for s in self.stds:
s_stdtab = pd.DataFrame(columns=pd.MultiIndex.from_product([s.analyte_ratios, ['err', 'mean']]))
s_stdtab.index.name = 'uTime'
if not s.n > 0:
s.stdtab = s_stdtab
continue
for n in range(1, s.n + 1):
ind = s.ns == n
if sum(ind) >= n_min:
for a in s.analyte_ratios:
aind = ind & ~np.isnan(nominal_values(s.data[focus_stage][a]))
s_stdtab.loc[np.nanmean(s.uTime[s.ns == n]),
(a, 'mean')] = np.nanmean(nominal_values(s.data[focus_stage][a][aind]))
s_stdtab.loc[np.nanmean(s.uTime[s.ns == n]),
(a, 'err')] = np.nanstd(nominal_values(s.data[focus_stage][a][aind])) / np.sqrt(sum(aind))
else:
warns.append(' Ablation {:} of SRM measurement {:} ({:} points)'.format(n, s.sample, sum(ind)))
# sort column multiindex
s_stdtab = s_stdtab.loc[:, s_stdtab.columns.sort_values()]
# sort row index
s_stdtab.sort_index(inplace=True)
# create 'SRM' column for naming SRM
s_stdtab.loc[:, 'STD'] = s.sample
s.stdtab = s_stdtab
if len(warns) > 0:
print('WARNING: Some SRM ablations have been excluded because they do not contain enough data:')
print('\n'.join(warns))
print("To *include* these ablations, reduce the value of n_min (currently {:})".format(n_min))
# compile them into a table
stdtab = pd.concat([s.stdtab for s in self.stds]).apply(pd.to_numeric, 1, errors='ignore')
stdtab = stdtab.reindex(self.analytes_sorted(self.analyte_ratios, focus_stage=focus_stage) + ['STD'], level=0, axis=1)
# identify groups of consecutive SRMs
ts = stdtab.index.values
start_times = [s.uTime[0] for s in self.data.values()]
lastpos = sum(ts[0] > start_times)
group = [1]
for t in ts[1:]:
pos = sum(t > start_times)
rpos = pos - lastpos
if rpos <= 1:
group.append(group[-1])
else:
group.append(group[-1] + 1)
lastpos = pos
stdtab.loc[:, 'group'] = group
# calculate centre time for the groups
stdtab.loc[:, 'gTime'] = np.nan
for g, d in stdtab.groupby('group'):
ind = stdtab.group == g
stdtab.loc[ind, 'gTime'] = stdtab.loc[ind].index.values.mean()
self.stdtab = stdtab
def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], analytes=None, n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs using KMeans clustering.
KMeans is performed on the log of SRM composition, which aids separation
of relatively similar SRMs within a large compositional range.
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
analytes : array_like
Which analyte ratios to base the identification on. If None,
all analyte ratios are used (default).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
reload_srm_database : bool
Whether or not to re-load the SRM database before running the function.
"""
# TODO: srm_id_plot!
if isinstance(srms_used, str):
srms_used = [srms_used]
# reload SRM database (if reloard_srm_databse=True)
self.srm_load_database(srms_used, reload_srm_database)
analytes = self._analyte_checker(analytes)
analytes.difference_update(self._analytes_missing_srm)
analytes = list(analytes)
# get and scale mean srm values for all analytes
srmid = self.srmtab.loc[:, idx[analytes, 'mean']]
_srmid = scale(np.log(srmid))
srm_labels = srmid.index.values
# get and scale measured srm values for all analytes
stdid = self.stdtab.loc[:, idx[analytes, 'mean']]
_stdid = scale(np.log(stdid))
_stdid[np.isnan(_stdid)] = -12
# fit KMeans classifier to srm database
classifier = KMeans(len(srms_used)).fit(_srmid)
# apply classifier to measured data
std_classes = classifier.predict(_stdid)
# get srm names from classes
std_srm_labels = np.array([srm_labels[np.argwhere(classifier.labels_ == i)][0][0] for i in std_classes])
self.stdtab.loc[:, 'SRM'] = std_srm_labels
self._srm_key_dict = {k: v for k, v in zip(self.stdtab.STD, self.stdtab.SRM)}
self.srms_ided = True
self.srm_build_calib_table()
def srm_build_calib_table(self):
"""
Combine SRM database values and identified measured values into a calibration database.
"""
caltab = self.stdtab.reset_index()
caltab.set_index(['gTime', 'uTime'], inplace=True)
levels = ['meas_' + c if c != '' else c for c in caltab.columns.levels[1]]
caltab.columns.set_levels(levels, 1, inplace=True)
for a in self.analyte_ratios:
caltab.loc[:, (a, 'srm_mean')] = self.srmtab.loc[caltab.SRM, (a, 'mean')].values
caltab.loc[:, (a, 'srm_err')] = self.srmtab.loc[caltab.SRM, (a, 'err')].values
self.caltab = caltab.reindex(self.stdtab.columns.levels[0], axis=1, level=0)
def clear_calibration(self):
if self.srms_ided:
del self.stdtab
del self.srmdat
del self.srmtab
self.srms_ided = False
if 'calibrated' in self.stages_complete:
del self.calib_params
del self.calib_ps
self.stages_complete.remove('calibrated')
self.focus_stage = 'ratios'
self.set_focus('ratios')
# apply calibration to data
@_log
def calibrate(self, analytes=None, drift_correct=True,
srms_used=['NIST610', 'NIST612', 'NIST614'],
zero_intercept=True, n_min=10, reload_srm_database=False):
"""
Calibrates the data to measured SRM values.
Assumes that y intercept is zero.
Parameters
----------
analytes : str or iterable
Which analytes you'd like to calibrate. Defaults to all.
drift_correct : bool
Whether to pool all SRM measurements into a single calibration,
or vary the calibration through the run, interpolating
coefficients between measured SRMs.
srms_used : str or iterable
Which SRMs have been measured. Must match names given in
SRM data file *exactly*.
n_min : int
The minimum number of data points an SRM measurement
must have to be included.
Returns
-------
None
"""
# load SRM database
self.srm_load_database(srms_used, reload_srm_database)
# compile measured SRM data
self.srm_compile_measured(n_min)
analytes = self._analyte_checker(analytes)
if isinstance(srms_used, str):
srms_used = [srms_used]
if not hasattr(self, 'srmtabs'):
self.srm_id_auto(srms_used=srms_used, n_min=n_min, reload_srm_database=reload_srm_database)
# make container for calibration params
gTime = np.asanyarray(self.caltab.index.levels[0])
if not hasattr(self, 'calib_params'):
self.calib_params = pd.DataFrame(columns=pd.MultiIndex.from_product([analytes, ['m']]),
index=gTime)
if zero_intercept:
fn = lambda x, m: x * m
else:
fn = lambda x, m, c: x * m + c
for a in analytes:
if zero_intercept:
if (a, 'c') in self.calib_params:
self.calib_params.drop((a, 'c'), 1, inplace=True)
else:
self.calib_params.loc[:, (a, 'c')] = 0
self.calib_params.loc[:, (a, 'c')] = self.calib_params[(a, 'c')].astype(object, copy=False) # set new column to objet type
if drift_correct:
for g in gTime:
if self.caltab.loc[g].size == 0:
continue
meas = self.caltab.loc[g, (a, 'meas_mean')].values
srm = self.caltab.loc[g, (a, 'srm_mean')].values
viable = ~np.isnan(meas + srm) # remove any nan values
meas = meas[viable]
srm = srm[viable]
meas_err = self.caltab.loc[g, (a, 'meas_err')].values[viable]
srm_err = self.caltab.loc[g, (a, 'srm_err')].values[viable]
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation?
sigma = np.sqrt(meas_err**2 + srm_err**2)
if len(meas) > 1:
# multiple SRMs - do a regression
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[g, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = pe[1]
else:
# deal with case where there's only one datum
self.calib_params.loc[g, (a, 'm')] = (un.uarray(srm, srm_err) /
un.uarray(meas, meas_err))[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = 0
else:
meas = self.caltab.loc[:, (a, 'meas_mean')].values
srm = self.caltab.loc[:, (a, 'srm_mean')].values
viable = ~np.isnan(meas + srm) # remove any nan values
meas = meas[viable]
srm = srm[viable]
meas_err = self.caltab.loc[:, (a, 'meas_err')].values[viable]
srm_err = self.caltab.loc[:, (a, 'srm_err')].values[viable]
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation?
sigma = np.sqrt(meas_err**2 + srm_err**2)
if sum(viable) > 1:
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[:, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = pe[1]
else:
self.calib_params.loc[:, (a, 'm')] = (un.uarray(srm, srm_err) /
un.uarray(meas, meas_err))[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = 0
if self.calib_params.index.min() == 0:
self.calib_params.drop(0, inplace=True)
self.calib_params.drop(self.calib_params.index.max(), inplace=True)
self.calib_params.loc[0, :] = self.calib_params.loc[self.calib_params.index.min(), :]
maxuT = np.max([d.uTime.max() for d in self.data.values()]) # calculate max uTime
self.calib_params.loc[maxuT, :] = self.calib_params.loc[self.calib_params.index.max(), :]
# sort indices for slice access
self.calib_params.sort_index(1, inplace=True)
self.calib_params.sort_index(0, inplace=True)
# calculcate interpolators for applying calibrations
self.calib_ps = Bunch()
for a in analytes:
# TODO: revisit un_interp1d to see whether it plays well with correlated values.
# Possible re-write to deal with covariance matrices?
self.calib_ps[a] = {'m': un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'm')].values)}
if not zero_intercept:
self.calib_ps[a]['c'] = un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'c')].values)
with self.pbar.set(total=len(self.data), desc='Applying Calibrations') as prog:
for d in self.data.values():
d.calibrate(self.calib_ps, analytes)
d.uncalibrated = self.uncalibrated
prog.update()
# record SRMs used for plotting
markers = 'osDsv<>PX' # for future implementation of SRM-specific markers.
if not hasattr(self, 'srms_used'):
self.srms_used = set(srms_used)
else:
self.srms_used.update(srms_used)
self.srm_mdict = {k: markers[i] for i, k in enumerate(self.srms_used)}
self.stages_complete.update(['calibrated'])
self.focus_stage = 'calibrated'
return
# data filtering
# TODO: Re-factor filtering to use 'classifier' objects?
# functions for calculating mass fraction (ppm)
def get_sample_list(self, save_as=None, overwrite=False):
"""
Save a csv list of of all samples to be populated with internal standard concentrations.
Parameters
----------
save_as : str
Location to save the file. Defaults to the export directory.
"""
if save_as is None:
save_as = self._file_internal_standard_massfrac
else:
self._file_internal_standard_massfrac = save_as
if os.path.exists(save_as):
if not overwrite:
raise IOError(f'File {save_as} exists. Please change the save location or specify overwrite=True')
empty = pd.DataFrame(index=self.samples, columns=['int_stand_massfrac'])
empty.to_csv(save_as)
print(self._wrap_text(f'Sample List saved to {save_as} \nPlease modify and re-import using read_internal_standard_concs()'))
def read_internal_standard_concs(self, sample_conc_file=None):
"""
Load in a per-sample list of internal sample concentrations.
Parameters
----------
sample_conc_file : str
Path to csv file containing internal standard mass fractions.
Must contain the sample names in the first column, column names
in the first row, and contain a column called 'int_stand_massfrac'.
If in doubt, use the `get_sample_list` function to generate a
blank template for your samples.
"""
if sample_conc_file is None:
sample_conc_file = self._file_internal_standard_massfrac
else:
self._file_internal_standard_massfrac = sample_conc_file
self.internal_standard_concs = pd.read_csv(sample_conc_file, index_col=0)
return self.internal_standard_concs
@_log
def calculate_mass_fraction(self, internal_standard_concs=None, analytes=None, analyte_masses=None):
"""
Convert calibrated molar ratios to mass fraction.
Parameters
----------
internal_standard_concs : float or str
The concentration of the internal standard in your samples.
If a string, should be the file name pointing towards the
[completed] output of get_sample_list().
analytes : str of array_like
The analytes you want to calculate.
analyte_masses : dict
A dict containing the masses to use for each analyte.
If None and the analyte names contain a number, that number
is used as the mass. If None and the analyte names do *not*
contain a number, the average mass for the element is used.
"""
analytes = self._analyte_checker(analytes, focus_stage='calibrated')
if analyte_masses is None:
analyte_masses = analyte_mass(self.analytes, False)
if isinstance(internal_standard_concs, str):
self.internal_standard_concs = self.read_internal_standard_concs(sample_conc_file=internal_standard_concs)
elif isinstance(internal_standard_concs, float):
self.internal_standard_concs = internal_standard_concs
elif not isinstance(self.internal_standard_concs, pd.DataFrame):
self.internal_standard_concs = self.read_internal_standard_concs()
isc = self.internal_standard_concs
if not isinstance(isc, pd.core.frame.DataFrame):
with self.pbar.set(total=len(self.data), desc='Calculating Mass Fractions') as prog:
for d in self.data.values():
d.calc_mass_fraction(isc, analytes, analyte_masses)
prog.update()
else:
with self.pbar.set(total=len(self.data), desc='Calculating Mass Fractions') as prog:
for k, d in self.data.items():
if k in isc.index:
d.calc_mass_fraction(isc.loc[k, 'int_stand_massfrac'], analytes, analyte_masses)
else:
d.calc_mass_fraction(np.nan, analytes, analyte_masses)
prog.update()
self.stages_complete.update(['mass_fraction'])
self.focus_stage = 'mass_fraction'
@_log
def clear_subsets(self):
"""
Clears all subsets
"""
self._has_subsets = False
self._subset_names = []
self.subsets = Bunch()
self.subsets['All_Analyses'] = self.samples
self.subsets[self.srm_identifier] = [s for s in self.samples if self.srm_identifier in s]
self.subsets['All_Samples'] = [s for s in self.samples if self.srm_identifier not in s]
self.subsets['not_in_set'] = self.subsets['All_Samples'].copy()
@_log
def make_subset(self, samples=None, name=None, force=False, silent=False):
"""
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array_like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
force : bool
If there is an existing subset that contains the same samples,
a new set is not created unles `force=True`. Default is False.
"""
if isinstance(samples, str):
samples = [samples]
# Check if a subset containing the same samples already exists.
already_present = False
existing_name = ''
for k, v in self.subsets.items():
if set(v) == set(samples) and k != 'not_in_set':
already_present = True
existing_name = k
if already_present:
if not silent:
print('***NOPE***')
print(self._wrap_text(
f"A subset containing those samples already exists, and is called '{existing_name}'. A new subset has not been created. I suggest you use the existing one. If you'd like to go ahead anyway, set `force=True` to make a new subset with your provided name."
))
if not force:
return
not_exists = [s for s in samples if s not in self.subsets['All_Analyses']]
if len(not_exists) > 0:
raise ValueError(', '.join(not_exists) + ' not in the list of sample names.\nPlease check your sample names.\nNote: Sample names are stored in the .samples attribute of your analysis.')
if name is None:
name = max([-1] + [x for x in self.subsets.keys() if isinstance(x, int)]) + 1
self._subset_names.append(name)
if samples is not None:
self.subsets[name] = samples
for s in samples:
try:
self.subsets['not_in_set'].remove(s)
except ValueError:
pass
self._has_subsets = True
# for subset in np.unique(list(self.subsets.values())):
# self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])
if not silent:
print(f'Subset created called {name}.')
return name
@_log
def zeroscreen(self, focus_stage=None):
"""
Remove all points containing data below zero (which are impossible!)
"""
if focus_stage is None:
focus_stage = self.focus_stage
for s in self.data.values():
ind = np.ones(len(s.Time), dtype=bool)
for v in s.data[focus_stage].values():
ind = ind & (nominal_values(v) > 0)
for k in s.data[focus_stage].keys():
s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)
self.set_focus(focus_stage)
return
@_log
def filter_threshold(self, analyte, threshold,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
threshold : float
The threshold value.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Threshold Filter') as prog:
for s in samples:
self.data[s].filter_threshold(analyte, threshold)
prog.update()
@_log
def filter_threshold_percentile(self, analyte, percentiles, level='population', filt=False,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
percentiles : float or iterable of len=2
The percentile values.
level : str
Whether to calculate percentiles from the entire dataset
('population') or for each individual sample ('individual')
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# Get all samples
self.get_focus(filt=filt, subset=subset, nominal=True)
dat = self.focus[analyte][~np.isnan(self.focus[analyte])]
# calculate filter limits
lims = np.percentile(dat, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile theshold filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = d.focus[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-pcnt_below'.format(percentiles[0]),
below,
'Values below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-pcnt_above'.format(percentiles[0]),
above,
'Values above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-pcnt_inside',
inside,
'Values between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-pcnt_outside',
outside,
'Values outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
@_log
def filter_gradient_threshold(self, analyte, threshold, win=15,
recalc=True, win_mode='mid', win_exclude_outside=True, absolute_gradient=True,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
threshold : float
The threshold value.
recalc : bool
Whether or not to re-calculate the gradients.
win_mode : str
Whether the rolling window should be centered on the left, middle or centre
of the returned value. Can be 'left', 'mid' or 'right'.
win_exclude_outside : bool
If True, regions at the start and end where the gradient cannot be calculated
(depending on win_mode setting) will be excluded by the filter.
absolute_gradient : bool
If True, the filter is applied to the absolute gradient (i.e. always positive),
allowing the selection of 'flat' vs 'steep' regions regardless of slope direction.
If Falose, the sign of the gradient matters, allowing the selection of positive or
negative slopes only.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Gradient Threshold Filter') as prog:
for s in samples:
self.data[s].filter_gradient_threshold(analyte=analyte, win=win, threshold=threshold, recalc=recalc,
win_mode=win_mode, win_exclude_outside=win_exclude_outside,
absolute_gradient=absolute_gradient)
prog.update()
@_log
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-grd-pcnt_below'.format(percentiles[0]),
below,
'Gradients below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-grd-pcnt_above'.format(percentiles[0]),
above,
'Gradients above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_inside',
inside,
'Gradients between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_outside',
outside,
'Gradients outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
@_log
def filter_clustering(self, analytes, filt=False, normalise=True,
method='kmeans', include_time=False, samples=None,
sort=True, subset=None, level='sample', min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, focus_stage=self.focus_stage)
samples = self._get_samples(subset)
analytes = self.analytes_sorted(analytes)
self.minimal_analytes.update(analytes)
if level == 'sample':
with self.pbar.set(total=len(samples), desc='Clustering Filter') as prog:
for s in samples:
self.data[s].filter_clustering(analytes=analytes, filt=filt,
normalise=normalise,
method=method,
include_time=include_time,
min_data=min_data,
sort=sort,
**kwargs)
prog.update()
if level == 'population':
if isinstance(sort, bool):
sort_by = 0
else:
sort_by = sort
name = '_'.join(analytes) + '_{}'.format(method)
self.fit_classifier(name=name, analytes=analytes, method=method,
subset=subset, filt=filt, sort_by=sort_by, **kwargs)
self.apply_classifier(name=name, subset=subset)
@_log
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples, silent=True)
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
self.minimal_analytes.update(analytes)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
@_log
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name
@_log
def filter_correlation(self, x_analyte, y_analyte, window=None,
r_threshold=0.9, p_threshold=0.05, filt=True,
samples=None, subset=None):
"""
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
x_analyte = self._analyte_checker(x_analyte, single=True)
y_analyte = self._analyte_checker(y_analyte, single=True)
self.minimal_analytes.update([x_analyte, y_analyte])
with self.pbar.set(total=len(samples), desc='Correlation Filter') as prog:
for s in samples:
self.data[s].filter_correlation(x_analyte, y_analyte,
window=window,
r_threshold=r_threshold,
p_threshold=p_threshold,
filt=filt)
prog.update()
@_log
def correlation_plots(self, x_analyte, y_analyte, window=15, filt=True, recalc=False, samples=None, subset=None, outdir=None):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
if outdir is None:
outdir = self.report_dir + '/correlations/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
x_analyte = self._analyte_checker(x_analyte, single=True)
y_analyte = self._analyte_checker(y_analyte, single=True)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, _ = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte,
window=window, filt=filt, recalc=recalc)
f.savefig('{}/{}_{}-{}.pdf'.format(outdir, s, x_analyte, y_analyte))
plt.close(f)
prog.update()
return
@_log
def filter_on(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte)
for s in samples:
try:
self.data[s].filt.on(analyte, filt)
except:
warnings.warn("filt.on failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
@_log
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
def filter_status(self, sample=None, subset=None, stds=False):
"""
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
"""
if sample is None and subset is None:
if not self._has_subsets:
return self.data[self.subsets['All_Samples'][0]].filt.filter_table
else:
fdfs = {}
for n in sorted(str(sn) for sn in self._subset_names):
if n in self.subsets:
pass
elif int(n) in self.subsets:
n = int(n)
pass
subset_name = str(n)
fdfs[subset_name] = self.data[self.subsets[n][0]].filt.filter_table
if len(self.subsets['not_in_set']) > 0:
fdfs['Not in Subset'] = self.data[self.subsets['not_in_set'][0]].filt.filter_table
return pd.concat(fdfs, names=['subset'])
elif sample is not None:
fdfs = {}
fdfs[sample] = self.data[sample].filt.filter_table
return pd.concat(fdfs, names=['sample'])
elif subset is not None:
if isinstance(subset, (str, int, float)):
subset = [subset]
fdfs = {}
for n in subset:
subset_name = str(n)
fdfs[subset_name] = self.data[self.subsets[n][0]].filt.filter_table
return | pd.concat(fdfs, names=['subset']) | pandas.concat |
import quandl
import pandas as pd
import numpy as np
def get_data(daysahead=20):
# import data
# USDCAD= quandl.get("FED/RXI_N_B_CA", authtoken="mmpvRYssGGBNky8<PASSWORD>")
# US overnight rates
EffFedRate = quandl.get("FED/RIFSPFF_N_D", authtoken="<PASSWORD>", start_date='1980-01-01')
FedUppTarRange= quandl.get("FRED/DFEDTARU", authtoken="<PASSWORD>")
FedLowTarRange= quandl.get("FRED/DFEDTARL", authtoken="<PASSWORD>")
FedHisTarRate=quandl.get("FRED/DFEDTAR", authtoken="<PASSWORD>", start_date='1980-01-01')
#US yield curve rates since 1990
USyields = quandl.get("USTREASURY/YIELD", authtoken="<PASSWORD>")
#net cad long/short spec and non-speculative positions
NetCAD=quandl.get("CFTC/090741_F_L_ALL")
#oil prices futures weekly - Calculate backwardation/contango
Oil4 = quandl.get("EIA/PET_RCLC4_W", authtoken="mmpvRYssG<PASSWORD>")
Oil4.columns=['Oil4']
Oil1 = quandl.get("EIA/PET_RCLC1_W", authtoken="mmpvRYssGGBN<PASSWORD>")
Oil1.columns=['Oil1']
#oil spot
Oilspot = quandl.get("FRED/DCOILWTICO", authtoken="<PASSWORD>")
Oilspot.columns=['Oilspot']
# Rig count
RigsUS = quandl.get("BKRHUGHES/COUNT_BY_TRAJECTORY", authtoken="<PASSWORD>")
RigsUS['RigsDelta']=RigsUS['Total']-RigsUS['Total'].shift()
RigsUS=RigsUS[['Total','RigsDelta']]
#US oil inventories
OilInv = quandl.get("EIA/WCESTUS1", authtoken="<PASSWORD>")
OilInv.columns=['Inv']
OilInv['InvDelta']=OilInv['Inv']-OilInv['Inv'].shift()
#USCPI
CPI = quandl.get("YALE/SP_CPI", authtoken="<PASSWORD>", start_date="1979-12-30")
CPI.columns=['CPI']
#Cad Bonds
CADBOC= pd.read_csv('C1.csv',skiprows=4, index_col=['Rates'],skipfooter=7).convert_objects(convert_numeric=True)# CANSIM table 176-0043 CanBonds
#BoC overnight rates
BOCON= pd.read_csv('C2.csv',skiprows=2, index_col=['Daily'])#CANSIM table 176-0048
BOCON.columns=['BOC fundrate']
BOCON.dropna(inplace=True)
# Employment numbers
USUnEm=quandl.get("FRED/UNRATE", authtoken="<PASSWORD>",start_date='1955-06-01')
USUnEm.columns=['Unemployment rate US']
USNonFarm=quandl.get("BLSE/CES0000000001", authtoken="<PASSWORD>",start_date='1955-06-01')
USNonFarm.columns=['1000s employed US']
employmentsituationdate=pd.DataFrame(pd.read_excel('EmploySitUS.xlsx',skiprows=35).iloc[:,0])
employmentsituationdate.columns=['date']
rest=pd.merge(USUnEm, employmentsituationdate,left_index=True, right_on='date',how='outer')
rest=rest.set_index('date')
rest.sort_index(level=0)
rest=pd.merge(rest, USNonFarm,left_index=True, right_index=True,how='outer')
rest['Uunemploy']=rest['Unemployment rate US'].shift(2)
rest.fillna(method='pad',inplace=True)
rest.tail(10)
emp=pd.merge(employmentsituationdate,rest,left_on='date',right_index=True)
emp.drop(['Unemployment rate US'],axis=1, inplace=True)
emp=emp.set_index('date')
CanEm=pd.read_csv('C3.csv',skiprows=3, index_col=['Data type']) #Cansim table 282-0087
CanEm=CanEm.iloc[0:3,5:].T['Seasonally adjusted']
CanEm.columns=[['1000s employed Can','Unemployment rate Can']]
CanEm1=CanEm.shift()
CanEm1.columns=[['C1000s employed shift1','CUnemploy rate shift1']]
CanEm2=CanEm.shift(2)
CanEm2.columns=[['C1000s employed shift2','CUnemploy rate shift2']]
CanEmS=pd.merge(CanEm1,CanEm2, left_index=True,right_index=True)
CanEmS['gainrateemp']=(CanEmS['C1000s employed shift1'].values -CanEmS['C1000s employed shift2'].values)/CanEmS['C1000s employed shift2']*100
CanEmS['gainrateunem']=CanEmS['CUnemploy rate shift1'].values-CanEmS['CUnemploy rate shift2'].values
CanEmS.index=pd.to_datetime(CanEmS.index)
CanDate=list(CanEmS.index)
for i in range(len(CanDate)):
if CanDate[i].weekday()==4:
pass
elif CanDate[i].weekday()<4:
CanDate[i]=CanDate[i]+pd.Timedelta(str(4-CanDate[i].weekday())+' days')
else:
CanDate[i]=CanDate[i]+pd.Timedelta(str(4-CanDate[i].weekday()+7)+' days')
CanEmS.index=pd.DatetimeIndex(CanDate)
CanEmSF=CanEmS[['CUnemploy rate shift1','CUnemploy rate shift2','gainrateemp','gainrateunem']]
# CanEmS.index.values=CanEmS.index.values.apply(lambda x: x.replace(day=1) )
#Add
# Currt= USDCAD
# Currt['Plus1']=Currt['Value'].shift(periods=-daysahead) # THIS IS THE 20 DAY AHEAD PRICE
# Currt['Minus1']=(Currt['Value'].shift(periods=1)-Currt['Value'])*100
# # Utest['Minus5']=(Currt['Value'].shift(periods=5)-Currt['Value'])*100
# Currt['Minus5']=(Currt['Value'].rolling(5).mean()-Currt['Value'])*100
# Currt['Minus30']=(Currt['Value'].rolling(30).mean()-Currt['Value'])*100
# Currt['Minus100']=(Currt['Value'].rolling(100).mean()-Currt['Value'])*100
# Currt['Gain']=Currt['Plus1']-Currt['Value']
# Currt['Result']=np.where(Currt['Gain']>0,1,0)
# Currt=Currt[['Value','Minus1','Minus5','Minus30', 'Minus100','Result','Gain']]
# Currt.columns=[['Current','Minus1','Minus5','Minus30', 'Minus100','Result','Gain']]
# Curr=Currt.iloc[100:]
Oil=pd.merge(Oilspot, Oil1, how='outer', left_index=True, right_index=True,suffixes=('_spot','_C1'))
Oil= | pd.merge(Oil, Oil4, how='outer', left_index=True, right_index=True) | pandas.merge |
#!/usr/bin/env python3
import argparse
import csv
import gzip
import io
import json
import os
from os import walk
import shutil
import sys
import tempfile
from datetime import datetime
import pandas as pd
import pyarrow as pa
import itertools
from io import StringIO
from sys import getsizeof
import pickle
import singer
from jsonschema import Draft4Validator, FormatChecker
from target_s3 import s3
from target_s3 import utils
logger = singer.get_logger()
def write_temp_pickle(data={}):
temp_unique_pkl = 'temp_unique.pickle'
dir_temp_file = os.path.join(tempfile.gettempdir(), temp_unique_pkl)
with open(dir_temp_file, 'wb') as handle:
pickle.dump(data, handle)
def read_temp_pickle():
data = {}
temp_unique_pkl = 'temp_unique.pickle'
dir_temp_file = os.path.join(tempfile.gettempdir(), temp_unique_pkl)
if os.path.isfile(dir_temp_file):
with open(dir_temp_file, 'rb') as handle:
data = pickle.load(handle)
return data
# Upload created files to S3
def upload_to_s3(s3_client, s3_bucket, filename, stream, field_to_partition_by_time,
record_unique_field, compression=None, encryption_type=None, encryption_key=None):
data = None
df = None
final_files_dir = ''
with open(filename, 'r') as f:
data = f.read().splitlines()
df = | pd.DataFrame(data) | pandas.DataFrame |
from typing import Optional
import pandas as pd
import numpy as np
from .common.helpers.helpers import Frame
from .settings import default_ticker, PeriodLength, _MONTHS_PER_YEAR
from .api.data_queries import QueryData
from .api.namespaces import get_assets_namespaces
class Asset:
"""
A financial asset, that could be used in a list of assets or in portfolio.
Parameters
----------
symbol: str, default "SPY.US"
Symbol is an asset ticker with namespace after dot. The default value is "SPY.US" (SPDR S&P 500 ETF Trust).
"""
def __init__(self, symbol: str = default_ticker):
if symbol is None or len(str(symbol).strip()) == 0:
raise ValueError("Symbol can not be empty")
self._symbol = str(symbol).strip()
self._check_namespace()
self._get_symbol_data(symbol)
self.ror: pd.Series = QueryData.get_ror(symbol)
self.first_date: pd.Timestamp = self.ror.index[0].to_timestamp()
self.last_date: pd.Timestamp = self.ror.index[-1].to_timestamp()
self.period_length: float = round(
(self.last_date - self.first_date) / np.timedelta64(365, "D"), ndigits=1
)
self.pl = PeriodLength(
self.ror.shape[0] // _MONTHS_PER_YEAR,
self.ror.shape[0] % _MONTHS_PER_YEAR,
)
def __repr__(self):
dic = {
"symbol": self.symbol,
"name": self.name,
"country": self.country,
"exchange": self.exchange,
"currency": self.currency,
"type": self.type,
"isin": self.isin,
"first date": self.first_date.strftime("%Y-%m"),
"last date": self.last_date.strftime("%Y-%m"),
"period length": "{:.2f}".format(self.period_length),
}
return repr(pd.Series(dic))
def _check_namespace(self):
namespace = self._symbol.split(".", 1)[-1]
allowed_namespaces = get_assets_namespaces()
if namespace not in allowed_namespaces:
raise ValueError(
f"{namespace} is not in allowed assets namespaces: {allowed_namespaces}"
)
def _get_symbol_data(self, symbol) -> None:
x = QueryData.get_symbol_info(symbol)
self.ticker: str = x["code"]
self.name: str = x["name"]
self.country: str = x["country"]
self.exchange: str = x["exchange"]
self.currency: str = x["currency"]
self.type: str = x["type"]
self.isin: str = x["isin"]
self.inflation: str = f"{self.currency}.INFL"
@property
def symbol(self) -> str:
"""
Return a symbol of the asset.
Returns
-------
str
"""
return self._symbol
@property
def price(self) -> Optional[float]:
"""
Return live price of an asset.
Live price is delayed (15-20 minutes).
For certain namespaces (FX, INDX, PIF etc.) live price is not supported.
Returns
-------
float, None
Live price of the asset. Returns None if not defined.
"""
return QueryData.get_live_price(self.symbol)
@property
def close_daily(self):
"""
Return close price time series historical daily data.
Returns
-------
Series
Time series of close price historical data (daily).
"""
return QueryData.get_close(self.symbol, period='D')
@property
def close_monthly(self):
"""
Return close price time series historical monthly data.
Monthly close time series not adjusted to for corporate actions: dividends and splits.
Returns
-------
Series
Time series of close price historical data (monthly).
Examples
--------
>>> import matplotlib.pyplot as plt
>>> x = ok.Asset('VOO.US')
>>> x.close_monthly.plot()
>>> plt.show()
"""
return Frame.change_period_to_month(self.close_daily)
@property
def adj_close(self):
"""
Return adjusted close price time series historical daily data.
The adjusted closing price amends a stock's closing price after accounting
for corporate actions: dividends and splits. All values are adjusted by reducing the price
prior to the dividend payment (or split).
Returns
-------
Series
Time series of adjusted close price historical data (daily).
"""
return QueryData.get_adj_close(self.symbol, period='D')
@property
def dividends(self) -> pd.Series:
"""
Return dividends time series historical monthly data.
Returns
-------
Series
Time series of dividends historical data (monthly).
Examples
--------
>>> x = ok.Asset('VNQ.US')
>>> x.dividends
Date
2004-12-22 1.2700
2005-03-24 0.6140
2005-06-27 0.6440
2005-09-26 0.6760
...
2020-06-25 0.7590
2020-09-25 0.5900
2020-12-24 1.3380
2021-03-25 0.5264
Freq: D, Name: VNQ.US, Length: 66, dtype: float64
"""
div = QueryData.get_dividends(self.symbol)
if div.empty:
# Zero time series for assets where dividend yield is not defined.
index = pd.date_range(
start=self.first_date, end=self.last_date, freq="MS", closed=None
)
period = index.to_period("D")
div = | pd.Series(data=0, index=period) | pandas.Series |
# -*- coding: utf-8 -*-
"""
File: M2KCustomPlotFFT.py
This custom script uses ADALM2000 exported CSV data to plot FFT results.
v 1.01, August 13, 2019
First release
v 1.10, October 12, 2019
Add peak display feature, along with two control parameters: peak_cnt and min_peak_height
- update constructor
- update plot_custom_script() method
Add intelligent display of frequency range for x-axis label in plot_custom_script()
Add intelligent display of voltage range for y-axis label and legend in plot_custom_script()
Eliminate labque titles from constructor, hard code them instead as:
self.plot_title_1
self.plot_title_2
Eliminate self.headings_to_use, replace with just y_heading
Algorithm:
Create the FFT from a given CSV data channel
Prepare a plot of the results
Include option to display largest peak information within plot (sorted by peak magnitude)
Author: <NAME>, <EMAIL>
License: MIT
Copyright 2019 <NAME>, Bioph<EMAIL>
See included license file (license.txt) or read it online: https://opensource.org/licenses/MIT
# --------------------------------------------------------------------------------------------
Main program refers to: M2KScopePlot.py
This script is completely contained within a class: CustomPlotScript(object):
Attributes:
values defined using __init__ constructor to customize the FFT plot
self.y_heading
self.n
self.peak_cnt
self.min_peak_height
flag used to control whether custom script should be used or not
self.is_custom_script
data collected from CSV data file during execution of main program
self.iplot_colors
self.df_data
self.info_dict
self.headings
Additional data copied from main program
self.iplot_colors
Additional strings used for FFT display in final lab report
self.plot_title_1
self.plot_title_2
Methods:
get_headings_to_use(self)
get_is_custom_script(self)
set_iplot_colors(self, iplot_colors)
set_df_data(self, df_data)
set_info_dict(self, info_dict)
set_headings(self, headings)
test_custom_plot(self)
reduce_sample_array(signal_data, n)
plot_custom_script(self)
Initiate the FFT plot script by creating an object within the main program (example):
custom_script = CustomPlotScript('CH1(V)', 10, 4, 0.0)
Where:
'CH1(V)' is the y data heading from csv file to use for fft and plot
10 reduces the data set by 10x to limit the x-axis frequency range plotted
4 requests up to 4 peaks to feature within the plot/legend and final lab report
0.0 requests all peaks found without any voltage limit (y-axis)
Note:
The peaks found are reported (labque and plot legend) with highest voltage first
"""
from scipy.fftpack import *
from scipy import signal
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from M2KScope.M2KScopePlotFncs import *
class CustomPlotScript(object):
"""
A class used to add a custom plot to the end of the subplots created in the main program
"""
def __init__(self, y_heading, n=1, peak_cnt=0, min_peak_height=0.0):
"""
:param y_heading: str
y-axis heading as found in csv data file
:param n: positive integer
Number of times to reduce FFT sampling frequency
Larger n generates narrower frequency range for plotted fft
When n=1 no sampling data will be removed
:param peak_cnt: integer
Maximum number of peaks to highlight (shown as vertical lines plus frequency, magnitude list in legend)
Peaks are found using the scipy.signal.find_peak() function
Number of peaks displayed will be the maximum of peaks found up to the number of peaks requested
Note: -1 indicates all peaks should be displayed that meet min_peak_height limitation
:param min_peak_height: float
Use this parameter to skip peaks that are too small to be of interest
The value corresponds to the FFT magnitude (in Volts). A value of .05 (Volts) for example
"""
self.y_heading = y_heading.strip()
self.n = int(n)
self.peak_cnt = int(peak_cnt)
self.min_peak_height = float(min_peak_height)
# Titles packaged for labque display using headings_to_use() method
self.plot_title_1 = "Custom FFT Plot using: "
self.plot_title_2 = ", with dataset reduced by a factor of: "
# Flag used to control custom script in main program
self.is_custom_script = False
# CSV data useful for custom scripting
# ------------------------------------
# List of colors to use for plotting
# example: iplot_colors = ['orange', 'magenta', 'green', 'blue', 'cyan', 'red', 'yellow', 'purple']
self.iplot_colors = []
# Pandas dataframe to hold imported csv column data, one row for each heading
#
# Example using print(df_data)
#
# Sample Time(S) CH1(V) CH2(V) M1(V)
# 0 0 -0.004000 -0.003607 -0.009240 0.004883
# 1 1 -0.003999 0.001272 -0.004327 0.004883
# 2 2 -0.003998 0.004524 0.000585 0.003906
# 3 3 -0.003997 0.011029 0.007136 0.003906
# ... ... ... ... ... ...
# 7997 7997 0.003997 -0.023123 -0.028891 0.004883
# 7998 7998 0.003998 -0.021497 -0.022341 0.000000
# 7999 7999 0.003999 -0.010112 -0.014153 0.003906
self.df_data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 14 13:52:36 2020
@author: diego
"""
import os
import sqlite3
import numpy as np
import pandas as pd
import plots as _plots
import update_prices
import update_companies_info
pd.set_option("display.width", 400)
pd.set_option("display.max_columns", 10)
pd.options.mode.chained_assignment = None
update_prices.update_prices()
update_companies_info.update_db()
cwd = os.getcwd()
conn = sqlite3.connect(os.path.join(cwd, "data", "finance.db"))
cur = conn.cursor()
# %% Functions
class Ticker:
"""
Attributes and Methods to analyse stocks traded in B3 -BOLSA BRASIL BALCÃO
"""
def __init__(self, ticker, group="consolidated"):
"""
Creates a Ticker Class Object
Args:
ticker: string
string of the ticker
group: string
Financial statements group. Can be 'consolidated' or 'individual'
"""
self.ticker = ticker.upper()
df = pd.read_sql(
f"""SELECT cnpj, type, sector, subsector, segment, denom_comerc
FROM tickers
WHERE ticker = '{self.ticker}'""",
conn,
)
if len(df) == 0:
print('unknown ticker')
return
self.cnpj = df["cnpj"][0]
self.type = df["type"][0]
self.sector = df["sector"][0]
self.subsector = df["subsector"][0]
self.segment = df["segment"][0]
self.denom_comerc = df["denom_comerc"][0]
Ticker.set_group(self, group)
on_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'ON'",
conn,
)
on_ticker = on_ticker[on_ticker["ticker"].str[-1] == "3"]
self.on_ticker = on_ticker.values[0][0]
try:
self.pn_ticker = pd.read_sql(
f"SELECT ticker FROM tickers WHERE cnpj = '{self.cnpj}' AND type = 'PN'",
conn,
).values[0][0]
except:
pass
def set_group(self, new_group):
"""
To change the financial statement group attribute of a object
Args:
new_group: string
can be 'consolidated' or 'individual'
"""
if new_group in ["individual", "consolidado", "consolidated"]:
if new_group == "individual":
self.grupo = "Individual"
else:
self.grupo = "Consolidado"
# Infer the frequency of the reports
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
if len(dates) == 0:
self.grupo = "Individual"
print(
f"The group of {self.ticker} was automatically switched to individual due to the lack of consolidated statements."
)
dates = pd.read_sql(
f"""SELECT DISTINCT dt_fim_exerc as date
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
ORDER BY dt_fim_exerc""",
conn,
)
try:
freq = pd.infer_freq(dates["date"])
self.freq = freq[0]
except ValueError:
self.freq = "Q"
except TypeError:
dates["date"] = pd.to_datetime(dates["date"])
number_of_observations = len(dates)
period_of_time = (
dates.iloc[-1, 0] - dates.iloc[0, 0]
) / np.timedelta64(1, "Y")
if number_of_observations / period_of_time > 1:
self.freq = "Q"
else:
self.freq = "A"
if self.freq == "A":
print(
f"""
The {self.grupo} statements of {self.ticker} are only available on an annual basis.
Only YTD values will be available in the functions and many functions will not work.
Try setting the financial statements to individual:
Ticker.set_group(Ticker object, 'individual')
"""
)
else:
print("new_group needs to be 'consolidated' or 'individual'.")
def get_begin_period(self, function, start_period):
"""
Support method for other methods of the Class
"""
if start_period == "all":
begin_period = pd.to_datetime("1900-01-01")
return begin_period.date()
elif start_period not in ["all", "last"]:
try:
pd.to_datetime(start_period)
except:
print(
"start_period must be 'last', 'all', or date formated as 'YYYY-MM-DD'."
)
return
if start_period == "last":
if function in ["prices", "total_shares", "market_value"]:
last_date = pd.read_sql(
f"SELECT date FROM prices WHERE ticker = '{self.ticker}' ORDER BY date DESC LIMIT(1)",
conn,
)
else:
last_date = pd.read_sql(
f"SELECT dt_fim_exerc FROM dre WHERE cnpj = '{self.cnpj}' AND grupo_dfp = '{self.grupo}' ORDER BY dt_fim_exerc DESC LIMIT(1)",
conn,
)
begin_period = pd.to_datetime(last_date.values[0][0])
else:
begin_period = pd.to_datetime(start_period)
return begin_period.date()
def create_pivot_table(df):
"""
Support method for other methods of the Class
"""
##### Creates a pivot table and add % change columns #####
# create columns with % change of the values
# value_types: ytd, quarter_value, ttm_value
first_type = df.columns.get_loc('ds_conta') + 1
value_types = list(df.columns[first_type:])
new_columns = [i + " % change" for i in value_types]
df[new_columns] = df[value_types].div(
df.groupby("cd_conta")[value_types].shift(1))
# the calculation of %change from ytd is different:
if 'ytd' in value_types:
shifted_values = df[['dt_fim_exerc', 'cd_conta', 'ytd']]
shifted_values = shifted_values.set_index(
[(pd.to_datetime(shifted_values['dt_fim_exerc']) + pd.DateOffset(years=1)), shifted_values['cd_conta']])
df = df.set_index([df['dt_fim_exerc'], df['cd_conta']])
df['ytd % change'] = df['ytd'] / shifted_values['ytd']
df[new_columns] = (df[new_columns] - 1) * 100
# reshape
df = df.pivot(
index=["cd_conta", "ds_conta"],
columns=["dt_fim_exerc"],
values=value_types + new_columns
)
# rename multiIndex column levels
df.columns = df.columns.rename("value", level=0)
df.columns = df.columns.rename("date", level=1)
# sort columns by date
df = df.sort_values([("date"), ("value")], axis=1, ascending=False)
# So times, the description of the accounts have small differences for the
# same account in different periods, as punctuation. The purpose of the df_index
# is to keep only one description to each account, avoiding duplicated rows.
df_index = df.reset_index().iloc[:, 0:2]
df_index.columns = df_index.columns.droplevel(1)
df_index = df_index.groupby("cd_conta").first()
# This groupby adds the duplicated rows
df = df.groupby(level=0, axis=0).sum()
# The next two lines add the account description to the dataframe multiIndex
df["ds_conta"] = df_index["ds_conta"]
df = df.set_index("ds_conta", append=True)
# Reorder the multiIndex column levels
df = df.reorder_levels(order=[1, 0], axis=1)
# Due to the command line 'df = df.sort_values([('dt_fim_exerc'), ('value')],
# axis=1, ascending=False)'
# the columns are ordered by date descending, and value descending. The pupose
# here is to set the order as: date descending and value ascending
df_columns = df.columns.to_native_types()
new_order = []
for i in range(1, len(df_columns), 2):
new_order.append(df_columns[i])
new_order.append(df_columns[i - 1])
new_order = pd.MultiIndex.from_tuples(
new_order, names=("date", "value"))
df = df[new_order]
return df
def income_statement(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the income statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="income_statement", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dre
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm == True:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if quarter == False:
df = df.drop(["quarter_value"], axis=1)
if ytd == False:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = pd.to_datetime(df["dt_fim_exerc"])
df = df[df["dt_fim_exerc"] >= begin_period + pd.DateOffset(months=12)]
df = df.drop(columns=["fiscal_quarter"])
df = Ticker.create_pivot_table(df)
return df
def balance_sheet(self, start_period="all", plot=False):
"""
Creates a dataframe with the balance sheet statement of the object.
Args:
start_period: string
plot: boolean
Returns: pandas dataframe
"""
begin_period = Ticker.get_begin_period(
self, function="bp", start_period=start_period
)
query = f"""SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpa
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
UNION ALL
SELECT dt_fim_exerc, cd_conta, ds_conta, vl_conta
FROM bpp
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn, parse_dates=['dt_fim_exerc'])
df = Ticker.create_pivot_table(df)
if plot:
_plots.bs_plot(df, self.ticker, self.grupo)
return df
def cash_flow(self, quarter=True, ytd=True, ttm=True, start_period="all"):
"""
Creates a dataframe with the cash flow statement of the object.
Args:
quarter: boolean
includes or not quarter values
ytd: boolean
includes or not year to date values
ttm: boolean
includes or not trailing twelve months value
start_period: string
Returns: pandas dataframe
"""
if self.freq == "A":
quarter = False
ttm = False
begin_period = Ticker.get_begin_period(
self, function="dfc", start_period=start_period
)
begin_period = begin_period + pd.DateOffset(months=-12)
query = f"""SELECT dt_fim_exerc, fiscal_quarter, cd_conta, ds_conta, vl_conta AS ytd
FROM dfc
WHERE cnpj = '{self.cnpj}'
AND grupo_dfp = '{self.grupo}'
AND dt_fim_exerc >= '{begin_period.date()}'
ORDER BY dt_fim_exerc"""
df = pd.read_sql(query, conn)
df["quarter_value"] = df[["cd_conta", "ytd"]
].groupby("cd_conta").diff()
df["quarter_value"][df["fiscal_quarter"] == 1] = df["ytd"][
df["fiscal_quarter"] == 1
]
if ttm:
df["ttm_value"] = (
df[["dt_fim_exerc", "cd_conta", "quarter_value"]]
.groupby("cd_conta")
.rolling(window=4, min_periods=4)
.sum()
.reset_index(0, drop=True)
)
if not quarter:
df = df.drop(["quarter_value"], axis=1)
if not ytd:
df = df.drop(["ytd"], axis=1)
df["dt_fim_exerc"] = | pd.to_datetime(df["dt_fim_exerc"]) | pandas.to_datetime |
import pandas as pd
import numpy as np
import itertools,os
import networkx as nx
from itertools import permutations
import seaborn as sns
# from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import pairwise_distances
from sklearn.cluster import AgglomerativeClustering
from ast import literal_eval
def reduced_QPspace(proto_name,flag=0):
df=pd.read_csv(proto_name+'/all_QPS.csv')
df.sort_values(by=['amp_fac'], ascending=False,inplace=True)
print(df)
if(flag==1):
df = df.query('max_amp >=10')
app='_max'
else:
df = df.query('amp_fac >=10')
app='_median'
print("PRINTING: ",df)
if 'host' in df.columns.values:
# df = df.applymap(str)
del df['host']
# del df['url']
newdf=df.copy()
del newdf['amp_fac']
del newdf['min_amp']
del newdf['max_amp']
del newdf['count_amp']
amps=df['amp_fac'].values
print("amps",amps.shape)
newdf = newdf.applymap(str)
arr =newdf.values
print(arr.shape,arr)
uniques = np.unique(arr)
X = np.searchsorted(uniques, arr)
avg_dist = pairwise_distances(X, X, metric='hamming')
print(avg_dist.shape)
print(avg_dist[0,:])
print(arr[0,:])
print(arr[1,:])
merge_thr = 1 / arr.shape[1]
print("merge thresh",merge_thr)
# n_clusters=10
# model = AgglomerativeClustering(n_clusters=n_clusters,linkage="average", affinity='hamming')
# model.fit(arr)
# print(model)
neigh_AFthresh= 10000
new_signatures=[]
ign_list=[]
for i in range(avg_dist.shape[0]):
if i in ign_list:
continue
currow= avg_dist[i,:]
my_AF=amps[i]
p1=np.where(currow<=merge_thr)[0]
neigh_AF=amps[p1]
possible_inds=np.where(np.abs(neigh_AF-my_AF)<=neigh_AFthresh)[0]
valid_inds = p1[possible_inds]
valid_inds =np.setdiff1d(valid_inds,ign_list)
ign_list.extend(list(valid_inds))
valid_AFS=neigh_AF[possible_inds]
rows= arr[valid_inds,:]
newrow=[]
for k in range(rows.shape[1]):
curvals=list(np.unique(rows[:,k]))
newrow.append(curvals)
newrow.append(np.mean(valid_AFS))
new_signatures.append(newrow)
print(len(new_signatures),new_signatures[0])
cols=df.columns.values[0:-3]
print(cols)
new_sigs=np.array(new_signatures)
new_sigDF= | pd.DataFrame(new_sigs,columns=cols) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 17:16:38 2020
@author: <NAME> @ UCL Energy Institute
===================================================================
Utils for text data analysis and visualisation
===================================================================
"""
try:
import _pickle as pickle
except:
import pickle
import pandas as pd
import numpy as np
import nltk
nltk.download('words')
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet')
from bs4 import BeautifulSoup
import re, random
import preprocessor as p
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from itertools import islice
###############################################################################
# Get info about data filenames, headers to drop and rename
def getDataInfo(dataset):
if dataset == 'socialmedia_disaster':
filenames = ['socialmedia-disaster-tweets-DFE.csv']
headerDropper = ['_golden', '_unit_id', '_unit_state','_trusted_judgments','_last_judgment_at','choose_one:confidence','choose_one_gold','keyword','location','tweetid','userid']
renamedColumns = {'choose_one': 'labels', 'text': 'message'}
elif dataset == 'multilingual_disaster':
filenames = ['disaster_response_messages_training.csv',
'disaster_response_messages_validation.csv',
'disaster_response_messages_test.csv',]
headerDropper = ['id', 'split', 'original','genre','PII','request','offer','aid_related',
'medical_help','medical_products','search_and_rescue','security','military',
'child_alone','water','food','shelter','clothing','money','missing_people',
'refugees','death','other_aid','infrastructure_related','transport',
'buildings','electricity','tools','hospitals','shops','aid_centers',
'other_infrastructure','weather_related','storm','fire',
'earthquake','cold','other_weather','direct_report','weather_related',
'floods']
renamedColumns = {'related': 'label'}
elif dataset == "UnifiedMETCEHFloodsUK":
filenames = ['Stevens2016_Met_CEH_Dataset_Public_Version.csv']
headerDropper = ['CID','ID','Year','Month','Day','Place(s) Affected','Source',
'Flood_Type','Flood','Dataset','England','Anglian','EA_Wales',
'Midlands','North-East','North-West','NI','Scotland',
'South-East','South-West','Unspecified']
renamedColumns = {'Description': 'message'}
return filenames, headerDropper, renamedColumns
###############################################################################
# Prepare data and splits for the given dataset
def prepareData(path, dataset, pos_ratio, targets):
filenames, headerDropper, renamedColumns = getDataInfo(dataset)
data = pd.DataFrame()
trainData, valData, testData = list(), list(), list()
for filename in filenames:
print('Reading file '+filename+' ...')
if dataset in ['UnifiedMETCEHFloodsUK']:
tweets = pd.read_csv(path+filename, header=1, encoding = 'latin-1')
else:
tweets = | pd.read_csv(path+filename, encoding = 'latin-1') | pandas.read_csv |
""" Test cases for DataFrame.plot """
import string
import warnings
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
from pandas.io.formats.printing import pprint_thing
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlotsSubplots(TestPlotBase):
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if kind != "bar":
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
y_cols = ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
for col in y_cols:
ax = testdata.plot(y=col)
result = ax.get_lines()[0].get_data()[1]
expected = testdata[col].values
assert (result == expected).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formatter (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formatter (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handles ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
def test_subplots_layout_multi_column(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
msg = "Layout of 1x1 must be larger than required size 3"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(1, 1))
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.plot(subplots=True, layout=(-1, -1))
@pytest.mark.parametrize(
"kwargs, expected_axes_num, expected_layout, expected_shape",
[
({}, 1, (1, 1), (1,)),
({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
],
)
def test_subplots_layout_single_column(
self, kwargs, expected_axes_num, expected_layout, expected_shape
):
# GH 6667
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, **kwargs)
self._check_axes_shape(
axes,
axes_num=expected_axes_num,
layout=expected_layout,
)
assert axes.shape == expected_shape
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
msg = "The number of passed axes must be 3, the same as the output plot"
with pytest.raises(ValueError, match=msg):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([0.1, 1.0, 10.0, 100])
# no subplots
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True
)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
def test_boxplot_subplots_return_type(self, hist_df):
df = hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
for t in ["dict", "axes", "both"]:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned,
t,
expected_keys=["height", "weight", "category"],
check_ax_title=False,
)
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(
np.random.randn(10, 2),
index=date_range("1/1/2000", periods=10),
columns=list("AB"),
)
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
| tm.close() | pandas._testing.close |
from datetime import datetime, time, timedelta
from pandas.compat import range
import sys
import os
import nose
import numpy as np
from pandas import Index, DatetimeIndex, Timestamp, Series, date_range, period_range
import pandas.tseries.frequencies as frequencies
from pandas.tseries.tools import to_datetime
import pandas.tseries.offsets as offsets
from pandas.tseries.period import PeriodIndex
import pandas.compat as compat
from pandas.compat import is_platform_windows
import pandas.util.testing as tm
from pandas import Timedelta
def test_to_offset_multiple():
freqstr = '2h30min'
freqstr2 = '2h 30min'
result = frequencies.to_offset(freqstr)
assert(result == frequencies.to_offset(freqstr2))
expected = offsets.Minute(150)
assert(result == expected)
freqstr = '2h30min15s'
result = frequencies.to_offset(freqstr)
expected = offsets.Second(150 * 60 + 15)
assert(result == expected)
freqstr = '2h 60min'
result = frequencies.to_offset(freqstr)
expected = offsets.Hour(3)
assert(result == expected)
freqstr = '15l500u'
result = frequencies.to_offset(freqstr)
expected = offsets.Micro(15500)
assert(result == expected)
freqstr = '10s75L'
result = frequencies.to_offset(freqstr)
expected = offsets.Milli(10075)
assert(result == expected)
freqstr = '2800N'
result = frequencies.to_offset(freqstr)
expected = offsets.Nano(2800)
assert(result == expected)
# malformed
try:
frequencies.to_offset('2h20m')
except ValueError:
pass
else:
assert(False)
def test_to_offset_negative():
freqstr = '-1S'
result = frequencies.to_offset(freqstr)
assert(result.n == -1)
freqstr = '-5min10s'
result = frequencies.to_offset(freqstr)
assert(result.n == -310)
def test_to_offset_leading_zero():
freqstr = '00H 00T 01S'
result = frequencies.to_offset(freqstr)
assert(result.n == 1)
freqstr = '-00H 03T 14S'
result = frequencies.to_offset(freqstr)
assert(result.n == -194)
def test_to_offset_pd_timedelta():
# Tests for #9064
td = Timedelta(days=1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(86401)
assert(expected==result)
td = Timedelta(days=-1, seconds=1)
result = frequencies.to_offset(td)
expected = offsets.Second(-86399)
assert(expected==result)
td = Timedelta(hours=1, minutes=10)
result = frequencies.to_offset(td)
expected = offsets.Minute(70)
assert(expected==result)
td = Timedelta(hours=1, minutes=-10)
result = frequencies.to_offset(td)
expected = offsets.Minute(50)
assert(expected==result)
td = Timedelta(weeks=1)
result = frequencies.to_offset(td)
expected = offsets.Day(7)
assert(expected==result)
td1 = Timedelta(hours=1)
result1 = frequencies.to_offset(td1)
result2 = frequencies.to_offset('60min')
assert(result1 == result2)
td = Timedelta(microseconds=1)
result = frequencies.to_offset(td)
expected = offsets.Micro(1)
assert(expected == result)
td = Timedelta(microseconds=0)
tm.assertRaises(ValueError, lambda: frequencies.to_offset(td))
def test_anchored_shortcuts():
result = frequencies.to_offset('W')
expected = frequencies.to_offset('W-SUN')
assert(result == expected)
result1 = frequencies.to_offset('Q')
result2 = frequencies.to_offset('Q-DEC')
expected = offsets.QuarterEnd(startingMonth=12)
assert(result1 == expected)
assert(result2 == expected)
result1 = frequencies.to_offset('Q-MAY')
expected = offsets.QuarterEnd(startingMonth=5)
assert(result1 == expected)
def test_get_rule_month():
result = frequencies._get_rule_month('W')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Week())
assert(result == 'DEC')
result = frequencies._get_rule_month('D')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.Day())
assert(result == 'DEC')
result = frequencies._get_rule_month('Q')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=12))
print(result == 'DEC')
result = frequencies._get_rule_month('Q-JAN')
assert(result == 'JAN')
result = frequencies._get_rule_month(offsets.QuarterEnd(startingMonth=1))
assert(result == 'JAN')
result = frequencies._get_rule_month('A-DEC')
assert(result == 'DEC')
result = frequencies._get_rule_month(offsets.YearEnd())
assert(result == 'DEC')
result = frequencies._get_rule_month('A-MAY')
assert(result == 'MAY')
result = frequencies._get_rule_month(offsets.YearEnd(month=5))
assert(result == 'MAY')
class TestFrequencyCode(tm.TestCase):
def test_freq_code(self):
self.assertEqual(frequencies.get_freq('A'), 1000)
self.assertEqual(frequencies.get_freq('3A'), 1000)
self.assertEqual(frequencies.get_freq('-1A'), 1000)
self.assertEqual(frequencies.get_freq('W'), 4000)
self.assertEqual(frequencies.get_freq('W-MON'), 4001)
self.assertEqual(frequencies.get_freq('W-FRI'), 4005)
for freqstr, code in compat.iteritems(frequencies._period_code_map):
result = frequencies.get_freq(freqstr)
self.assertEqual(result, code)
result = frequencies.get_freq_group(freqstr)
self.assertEqual(result, code // 1000 * 1000)
result = frequencies.get_freq_group(code)
self.assertEqual(result, code // 1000 * 1000)
def test_freq_group(self):
self.assertEqual(frequencies.get_freq_group('A'), 1000)
self.assertEqual(frequencies.get_freq_group('3A'), 1000)
self.assertEqual(frequencies.get_freq_group('-1A'), 1000)
self.assertEqual(frequencies.get_freq_group('A-JAN'), 1000)
self.assertEqual(frequencies.get_freq_group('A-MAY'), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd()), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=1)), 1000)
self.assertEqual(frequencies.get_freq_group(offsets.YearEnd(month=5)), 1000)
self.assertEqual(frequencies.get_freq_group('W'), 4000)
self.assertEqual(frequencies.get_freq_group('W-MON'), 4000)
self.assertEqual(frequencies.get_freq_group('W-FRI'), 4000)
self.assertEqual(frequencies.get_freq_group( | offsets.Week() | pandas.tseries.offsets.Week |
import pandas as pd
import numpy as np
import glob
import HP
from multiprocessing import Pool
def merge_assessment_score(df):
new_df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
sns.set_style('whitegrid')
cfs_tafd = 2.29568411*10**-5 * 86400 / 1000
def water_day(d):
return d - 274 if d >= 274 else d + 91
df = | pd.read_csv('../data/calfews_src-data.csv', index_col=0, parse_dates=True) | pandas.read_csv |
import glob
import os
import subprocess
import sys
import pandas as pd
FILES = (
"Youtube01-Psy.csv",
"Youtube02-KatyPerry.csv",
"Youtube03-LMFAO.csv",
"Youtube04-Eminem.csv",
"Youtube05-Shakira.csv",
)
DATA_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00380/YouTube-Spam-Collection-v1.zip"
DIRECTORY = "getting_started"
sys.path.insert(
0, os.path.split(os.path.dirname(__file__))[0]
) # so we can import from utils
from snorkle_example_utils.download_files import download_files
def load_unlabeled_spam_dataset():
"""Load spam training dataset without any labels."""
if os.path.basename(os.getcwd()) == "snorkel-tutorials":
os.chdir("getting_started")
download_files(FILES, DATA_URL, DIRECTORY)
filenames = sorted(glob.glob("data/Youtube*.csv"))
dfs = []
for i, filename in enumerate(filenames, start=1):
df = | pd.read_csv(filename) | pandas.read_csv |
###############
#
# Transform R to Python Copyright (c) 2019 <NAME> Released under the MIT license
#
###############
import os
import numpy as np
import pystan
import pandas
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
fish_num_climate_2 = pandas.read_csv('4-1-1-fish-num-2.csv')
print(fish_num_climate_2.head())
print(fish_num_climate_2.describe())
sns.scatterplot(
x='temperature',
y='fish_num',
hue='weather',
data=fish_num_climate_2
)
plt.show()
fish_num_climate_2_d = pandas.get_dummies(fish_num_climate_2, columns=["weather", "id"])
print(fish_num_climate_2_d.head())
fish_num = fish_num_climate_2_d['fish_num']
sample_num = len(fish_num)
sunny = fish_num_climate_2_d['weather_sunny']
temperature = fish_num_climate_2_d['temperature']
sunny_pred = [0, 1]
N_pred_s = len(sunny_pred)
temperature_pred = range(0,31)
N_pred_t = len(temperature_pred)
stan_data = {
'N': sample_num,
'fish_num': fish_num,
'sunny': sunny,
'temp': temperature,
'N_pred_s': N_pred_s,
'sunny_pred': sunny_pred,
'N_pred_t': N_pred_t,
'temp_pred': temperature_pred
}
if os.path.exists('4-1-2-poisson.pkl'):
sm = pickle.load(open('4-1-2-poisson.pkl', 'rb'))
else:
# a model using prior for mu and sigma.
sm = pystan.StanModel(file='4-1-2-poisson.stan')
# using seed=1856510770 to avoid more than 20log2 witch is restriction of poisson_log function.
mcmc_result = sm.sampling(
data=stan_data,
chains=4,
seed=1856510770,
iter=2000,
warmup=1000,
thin=1
)
print(mcmc_result)
mcmc_result.plot()
plt.show()
print(mcmc_result.get_seed())
# saving compiled model
if not os.path.exists('4-1-2-poisson.pkl'):
with open('4-1-2-poisson.pkl', 'wb') as f:
pickle.dump(sm, f)
mcmc_sample = mcmc_result.extract()
fish_num_pred = mcmc_sample['fish_num_pred']
df_c = pandas.DataFrame(fish_num_pred[:, 0, :])
df_s = pandas.DataFrame(fish_num_pred[:, 1, :])
df_c.columns = temperature_pred
df_s.columns = temperature_pred
# visualization
qua = [0.025, 0.25, 0.50, 0.75, 0.975]
d_est = | pandas.DataFrame() | pandas.DataFrame |
import pandas as pd
import torch
import json
import os
import argparse
import random
import pickle
import numpy as np
import data
import data_config
import train_code_base
import train_adae
import train_code_adv
import train_coral
import train_dae
import train_vae
import train_ae
import train_code_mmd
import train_dsn
import train_dsna
import fine_tuning
from copy import deepcopy
def wrap_training_params(training_params, type='unlabeled'):
aux_dict = {k: v for k, v in training_params.items() if k not in ['unlabeled', 'labeled']}
aux_dict.update(**training_params[type])
return aux_dict
def safe_make_dir(new_folder_name):
if not os.path.exists(new_folder_name):
os.makedirs(new_folder_name)
else:
print(new_folder_name, 'exists!')
def dict_to_str(d):
return "_".join(["_".join([k, str(v)]) for k, v in d.items()])
def main(args, drug):
if args.method == 'dsn':
train_fn = train_dsn.train_dsn
elif args.method == 'adae':
train_fn = train_adae.train_adae
elif args.method == 'coral':
train_fn = train_coral.train_coral
elif args.method == 'dae':
train_fn = train_dae.train_dae
elif args.method == 'vae':
train_fn = train_vae.train_vae
elif args.method == 'ae':
train_fn = train_ae.train_ae
elif args.method == 'code_mmd':
train_fn = train_code_mmd.train_code_mmd
elif args.method == 'code_base':
train_fn = train_code_base.train_code_base
elif args.method == 'dsna':
train_fn = train_dsna.train_dsna
else:
train_fn = train_code_adv.train_code_adv
device = 'cuda' if torch.cuda.is_available() else 'cpu'
gex_features_df = pd.read_csv(data_config.gex_feature_file, index_col=0)
test_df = gex_features_df.loc[gex_features_df.index.str.startswith('TCGA')]
#test_df.index = test_df.index.map(lambda x: x[:12])
#test_df = test_df.groupby(level=0).mean()
if not args.norm_flag:
method_save_folder = os.path.join('model_save', args.method)
else:
method_save_folder = os.path.join('model_save', f'{args.method}_norm')
with open(os.path.join(method_save_folder,f'train_params_{drug}.json'), 'r') as f:
training_params = json.load(f)
# with open(os.path.join(f'train_params.json'), 'r') as f:
# training_params = json.load(f)
params_dict = {}
if 'pretrain_num_epochs' in training_params['unlabeled']:
params_dict['pretrain_num_epochs'] = int(training_params['unlabeled']['pretrain_num_epochs'])
params_dict['train_num_epochs'] = int(training_params['unlabeled']['train_num_epochs'])
params_dict['dop'] = training_params['dop']
param_str = dict_to_str(params_dict)
training_params.update(
{
'device': device,
'input_dim': gex_features_df.shape[-1],
'model_save_folder': os.path.join(method_save_folder, param_str),
'es_flag': False,
'retrain_flag': args.retrain_flag,
'norm_flag': args.norm_flag
})
task_save_folder = os.path.join(f'{method_save_folder}', args.measurement, drug)
safe_make_dir(training_params['model_save_folder'])
safe_make_dir(task_save_folder)
random.seed(2020)
s_dataloaders, t_dataloaders = data.get_unlabeled_dataloaders(
gex_features_df=gex_features_df,
seed=2020,
batch_size=training_params['unlabeled']['batch_size']
)
# start unlabeled training
encoder, historys = train_fn(s_dataloaders=s_dataloaders,
t_dataloaders=t_dataloaders,
**wrap_training_params(training_params, type='unlabeled'))
if args.retrain_flag:
with open(os.path.join(training_params['model_save_folder'], f'unlabel_train_history.pickle'),
'wb') as f:
for history in historys:
pickle.dump(dict(history), f)
prediction_df = None
labeled_dataloader_generator = data.get_labeled_dataloader_generator(
gex_features_df=gex_features_df,
seed=2020,
batch_size=training_params['labeled']['batch_size'],
drug=drug,
ccle_measurement=args.measurement,
threshold=None,
days_threshold=None,
pdtc_flag=args.pdtc_flag,
n_splits=args.n)
fold_count = 0
for train_labeled_ccle_dataloader, test_labeled_ccle_dataloader, labeled_tcga_dataloader in labeled_dataloader_generator:
ft_encoder = deepcopy(encoder)
#print(train_labeled_ccle_dataloader.dataset.tensors[1].sum())
#print(test_labeled_ccle_dataloader.dataset.tensors[1].sum())
#print(labeled_tcga_dataloader.dataset.tensors[1].sum())
target_classifier, ft_historys, temp_df = fine_tuning.fine_tune_encoder(
encoder=ft_encoder,
train_dataloader=train_labeled_ccle_dataloader,
val_dataloader=test_labeled_ccle_dataloader,
test_dataloader=labeled_tcga_dataloader,
test_df=test_df,
seed=fold_count,
normalize_flag=args.norm_flag,
metric_name=args.metric,
task_save_folder=task_save_folder,
**wrap_training_params(training_params, type='labeled')
)
prediction_df = pd.concat([prediction_df, temp_df], axis=1)
prediction_df.to_csv(os.path.join(task_save_folder, 'tcga_predcition_cv.csv'), index_label='Sample')
fold_count += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser('ADSN training and evaluation')
parser.add_argument('--method', dest='method', nargs='?', default='adsn',
choices=['code_adv', 'dsn', 'dsna', 'code_base', 'code_mmd', 'adae', 'coral', 'dae', 'vae', 'ae'])
parser.add_argument('--metric', dest='metric', nargs='?', default='auroc', choices=['auroc', 'auprc'])
parser.add_argument('--measurement', dest='measurement', nargs='?', default='AUC', choices=['AUC', 'LN_IC50'])
parser.add_argument('--a_thres', dest='a_thres', nargs='?', type=float, default=None)
parser.add_argument('--d_thres', dest='days_thres', nargs='?', type=float, default=None)
parser.add_argument('--n', dest='n', nargs='?', type=int, default=5)
train_group = parser.add_mutually_exclusive_group(required=False)
train_group.add_argument('--train', dest='retrain_flag', action='store_true')
train_group.add_argument('--no-train', dest='retrain_flag', action='store_false')
parser.set_defaults(retrain_flag=False)
train_group.add_argument('--pdtc', dest='pdtc_flag', action='store_true')
train_group.add_argument('--no-pdtc', dest='pdtc_flag', action='store_false')
parser.set_defaults(pdtc_flag=False)
norm_group = parser.add_mutually_exclusive_group(required=False)
norm_group.add_argument('--norm', dest='norm_flag', action='store_true')
norm_group.add_argument('--no-norm', dest='norm_flag', action='store_false')
parser.set_defaults(norm_flag=False)
args = parser.parse_args()
if args.pdtc_flag:
drug_list = | pd.read_csv(data_config.gdsc_pdtc_drug_name_mapping_file, index_col=0) | pandas.read_csv |
"""
Modify the tips example to make it robust to outliers. Try with one shared for
all groups and also with one per group. Run posterior predictive checks to
assess these three models.
"""
import pandas as pd
import random
from pandas import DataFrame, Series
import matplotlib.pyplot as plt
import arviz as az
import pymc3 as pm
import numpy as np
tips = pd.read_csv('../data/tips.csv')
tip = tips['tip'].values
idx = | pd.Categorical(tips['day'], categories=['Thur', 'Fri', 'Sat', 'Sun']) | pandas.Categorical |
# License: Apache-2.0
from gators.encoders.target_encoder import TargetEncoder
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = | pd.Series([0, 0, 0, 1, 1, 0], name='TARGET') | pandas.Series |
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from abc import ABCMeta, abstractmethod
import numpy as np
import pandas as pd
from six import with_metaclass
from zipline.data._resample import (
_minute_to_session_open,
_minute_to_session_high,
_minute_to_session_low,
_minute_to_session_close,
_minute_to_session_volume,
)
from zipline.data.minute_bars import MinuteBarReader
from zipline.data.session_bars import SessionBarReader
from zipline.utils.memoize import lazyval
_MINUTE_TO_SESSION_OHCLV_HOW = OrderedDict((
('open', 'first'),
('high', 'max'),
('low', 'min'),
('close', 'last'),
('volume', 'sum'),
))
def minute_frame_to_session_frame(minute_frame, calendar):
"""
Resample a DataFrame with minute data into the frame expected by a
BcolzDailyBarWriter.
Parameters
----------
minute_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `dt` (minute dts)
calendar : zipline.utils.calendars.trading_calendar.TradingCalendar
A TradingCalendar on which session labels to resample from minute
to session.
Return
------
session_frame : pd.DataFrame
A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`,
and `day` (datetime-like).
"""
how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c])
for c in minute_frame.columns)
return minute_frame.groupby(calendar.minute_to_session_label).agg(how)
def minute_to_session(column, close_locs, data, out):
"""
Resample an array with minute data into an array with session data.
This function assumes that the minute data is the exact length of all
minutes in the sessions in the output.
Parameters
----------
column : str
The `open`, `high`, `low`, `close`, or `volume` column.
close_locs : array[intp]
The locations in `data` which are the market close minutes.
data : array[float64|uint32]
The minute data to be sampled into session data.
The first value should align with the market open of the first session,
containing values for all minutes for all sessions. With the last value
being the market close of the last session.
out : array[float64|uint32]
The output array into which to write the sampled sessions.
"""
if column == 'open':
_minute_to_session_open(close_locs, data, out)
elif column == 'high':
_minute_to_session_high(close_locs, data, out)
elif column == 'low':
_minute_to_session_low(close_locs, data, out)
elif column == 'close':
_minute_to_session_close(close_locs, data, out)
elif column == 'volume':
_minute_to_session_volume(close_locs, data, out)
return out
class DailyHistoryAggregator(object):
"""
Converts minute pricing data into a daily summary, to be used for the
last slot in a call to history with a frequency of `1d`.
This summary is the same as a daily bar rollup of minute data, with the
distinction that the summary is truncated to the `dt` requested.
i.e. the aggregation slides forward during a the course of simulation day.
Provides aggregation for `open`, `high`, `low`, `close`, and `volume`.
The aggregation rules for each price type is documented in their respective
"""
def __init__(self, market_opens, minute_reader, trading_calendar):
self._market_opens = market_opens
self._minute_reader = minute_reader
self._trading_calendar = trading_calendar
# The caches are structured as (date, market_open, entries), where
# entries is a dict of asset -> (last_visited_dt, value)
#
# Whenever an aggregation method determines the current value,
# the entry for the respective asset should be overwritten with a new
# entry for the current dt.value (int) and aggregation value.
#
# When the requested dt's date is different from date the cache is
# flushed, so that the cache entries do not grow unbounded.
#
# Example cache:
# cache = (date(2016, 3, 17),
# pd.Timestamp('2016-03-17 13:31', tz='UTC'),
# {
# 1: (1458221460000000000, np.nan),
# 2: (1458221460000000000, 42.0),
# })
self._caches = {
'open': None,
'high': None,
'low': None,
'close': None,
'volume': None
}
# The int value is used for deltas to avoid extra computation from
# creating new Timestamps.
self._one_min = pd.Timedelta('1 min').value
def _prelude(self, dt, field):
session = self._trading_calendar.minute_to_session_label(dt)
dt_value = dt.value
cache = self._caches[field]
if cache is None or cache[0] != session:
market_open = self._market_opens.loc[session]
cache = self._caches[field] = (session, market_open, {})
_, market_open, entries = cache
market_open = market_open.tz_localize('UTC')
if dt != market_open:
prev_dt = dt_value - self._one_min
else:
prev_dt = None
return market_open, prev_dt, dt_value, entries
def opens(self, assets, dt):
"""
The open field's aggregation returns the first value that occurs
for the day, if there has been no data on or before the `dt` the open
is `nan`.
Once the first non-nan open is seen, that value remains constant per
asset for the remainder of the day.
Returns
-------
np.array with dtype=float64, in order of assets parameter.
"""
market_open, prev_dt, dt_value, entries = self._prelude(dt, 'open')
opens = []
session_label = self._trading_calendar.minute_to_session_label(dt)
for asset in assets:
if not asset.is_alive_for_session(session_label):
opens.append(np.NaN)
continue
if prev_dt is None:
val = self._minute_reader.get_value(asset, dt, 'open')
entries[asset] = (dt_value, val)
opens.append(val)
continue
else:
try:
last_visited_dt, first_open = entries[asset]
if last_visited_dt == dt_value:
opens.append(first_open)
continue
elif not | pd.isnull(first_open) | pandas.isnull |
#############################################################
# ActivitySim verification against TM1
# <NAME>, <EMAIL>, 02/22/19
# C:\projects\activitysim\verification>python compare_results.py
#############################################################
import pandas as pd
import openmatrix as omx
#############################################################
# INPUTS
#############################################################
pipeline_filename = 'asim/pipeline.h5'
distance_matrix_filename = "asim/skims.omx"
asim_nmtf_alts_filename = "asim/non_mandatory_tour_frequency_alternatives.csv"
process_sp = True # False skip work/sch shadow pricing comparisons, True do them
process_tm1 = True # False only processes asim, True processes tm1 as well
asim_sp_work_filename = "asim/shadow_price_workplace_modeled_size_10.csv"
asim_sp_school_filename = "asim/shadow_price_school_modeled_size_10.csv"
asim_sp_school_no_sp_filename = "asim/shadow_price_school_modeled_size_1.csv"
tm1_access_filename = "tm1/accessibility.csv"
tm1_sp_filename = "tm1/ShadowPricing_9.csv"
tm1_work_filename = "tm1/wsLocResults_1.csv"
tm1_ao_filename = "tm1/aoResults.csv"
tm1_hh_filename = "tm1/householdData_1.csv"
tm1_cdap_filename = "tm1/cdapResults.csv"
tm1_per_filename = "tm1/personData_1.csv"
tm1_tour_filename = "tm1/indivTourData_1.csv"
tm1_jtour_filename = "tm1/jointTourData_1.csv"
tm1_trips_filename = "tm1/indivTripData_1.csv"
tm1_jtrips_filename = "tm1/jointTripData_1.csv"
#############################################################
# OUTPUT FILES FOR DEBUGGING
#############################################################
asim_zones_filename = "asim/asim_zones.csv"
asim_access_filename = "asim/asim_access.csv"
asim_per_filename = "asim/asim_per.csv"
asim_hh_filename = "asim/asim_hh.csv"
asim_tour_filename = "asim/asim_tours.csv"
asim_trips_filename = "asim/asim_trips.csv"
#############################################################
# COMMON LABELS
#############################################################
ptypes = ["", "Full-time worker", "Part-time worker", "University student", "Non-worker",
"Retired", "Student of driving age", "Student of non-driving age",
"Child too young for school"]
mode_labels = ["", "DRIVEALONEFREE", "DRIVEALONEPAY", "SHARED2FREE", "SHARED2PAY", "SHARED3FREE",
"SHARED3PAY", "WALK", "BIKE", "WALK_LOC", "WALK_LRF", "WALK_EXP", "WALK_HVY",
"WALK_COM", "DRIVE_LOC", "DRIVE_LRF", "DRIVE_EXP", "DRIVE_HVY", "DRIVE_COM"]
#############################################################
# DISTANCE SKIM
#############################################################
# read distance matrix (DIST)
distmat = omx.open_file(distance_matrix_filename)["DIST"][:]
#############################################################
# EXPORT TABLES
#############################################################
# write tables for verification
tazs = pd.read_hdf(pipeline_filename, "land_use/initialize_landuse")
tazs["zone"] = tazs.index
tazs.to_csv(asim_zones_filename, index=False)
access = pd.read_hdf(pipeline_filename, "accessibility/compute_accessibility")
access.to_csv(asim_access_filename, index=False)
hh = pd.read_hdf(pipeline_filename, "households/joint_tour_frequency")
hh["household_id"] = hh.index
hh.to_csv(asim_hh_filename, index=False)
per = pd.read_hdf(pipeline_filename, "persons/non_mandatory_tour_frequency")
per["person_id"] = per.index
per.to_csv(asim_per_filename, index=False)
tours = pd.read_hdf(pipeline_filename, "tours/stop_frequency")
tours["tour_id"] = tours.index
tours.to_csv(asim_tour_filename, index=False)
trips = pd.read_hdf(pipeline_filename, "trips/trip_mode_choice")
trips["trip_id"] = trips.index
trips.to_csv(asim_trips_filename, index=False)
#############################################################
# AGGREGATE
#############################################################
# accessibilities
if process_tm1:
tm1_access = pd.read_csv(tm1_access_filename)
tm1_access.to_csv("outputs/tm1_access.csv", na_rep=0)
asim_access = pd.read_csv(asim_access_filename)
asim_access.to_csv("outputs/asim_access.csv", na_rep=0)
#############################################################
# HOUSEHOLD AND PERSON
#############################################################
# work and school location
if process_sp:
if process_tm1:
tm1_markets = ["work_low", "work_med", "work_high", "work_high", "work_very high", "university",
"school_high", "school_grade"]
tm1 = pd.read_csv(tm1_sp_filename)
tm1 = tm1.groupby(tm1["zone"]).sum()
tm1["zone"] = tm1.index
tm1 = tm1.loc[tm1["zone"] > 0]
ws_size = tm1[["zone"]]
for i in range(len(tm1_markets)):
ws_size[tm1_markets[i] + "_modeledDests"] = tm1[tm1_markets[i] + "_modeledDests"]
ws_size.to_csv("outputs/tm1_work_school_location.csv", na_rep=0)
asim_markets = ["work_low", "work_med", "work_high", "work_high", "work_veryhigh", "university",
"highschool", "gradeschool"]
asim = pd.read_csv(asim_sp_work_filename)
asim_sch = pd.read_csv(asim_sp_school_filename)
asim_sch_no_sp = pd.read_csv(asim_sp_school_no_sp_filename)
asim_sch["gradeschool"] = asim_sch_no_sp["gradeschool"] # grade school not shadow priced
asim = asim.set_index("TAZ", drop=False)
asim_sch = asim_sch.set_index("TAZ", drop=False)
asim["gradeschool"] = asim_sch["gradeschool"].loc[asim["TAZ"]].tolist()
asim["highschool"] = asim_sch["highschool"].loc[asim["TAZ"]].tolist()
asim["university"] = asim_sch["university"].loc[asim["TAZ"]].tolist()
ws_size = asim[["TAZ"]]
for i in range(len(asim_markets)):
ws_size[asim_markets[i] + "_asim"] = asim[asim_markets[i]]
ws_size.to_csv("outputs/asim_work_school_location.csv", na_rep=0)
# work county to county flows
tazs = pd.read_csv(asim_zones_filename)
counties = ["", "SF", "SM", "SC", "ALA", "CC", "SOL", "NAP", "SON", "MAR"]
tazs["COUNTYNAME"] = pd.Series(counties)[tazs["county_id"].tolist()].tolist()
tazs = tazs.set_index("zone", drop=False)
if process_tm1:
tm1_work = pd.read_csv(tm1_work_filename)
tm1_work["HomeCounty"] = tazs["COUNTYNAME"].loc[tm1_work["HomeTAZ"]].tolist()
tm1_work["WorkCounty"] = tazs["COUNTYNAME"].loc[tm1_work["WorkLocation"]].tolist()
tm1_work_counties = tm1_work.groupby(["HomeCounty", "WorkCounty"]).count()["HHID"]
tm1_work_counties = tm1_work_counties.reset_index()
tm1_work_counties = tm1_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
tm1_work_counties.to_csv("outputs/tm1_work_counties.csv", na_rep=0)
asim_cdap = pd.read_csv(asim_per_filename)
asim_cdap["HomeCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["home_taz"]].tolist()
asim_cdap["WorkCounty"] = tazs["COUNTYNAME"].loc[asim_cdap["workplace_zone_id"]].tolist()
asim_work_counties = asim_cdap.groupby(["HomeCounty", "WorkCounty"]).count()["household_id"]
asim_work_counties = asim_work_counties.reset_index()
asim_work_counties = asim_work_counties.pivot(index="HomeCounty", columns="WorkCounty")
asim_work_counties.to_csv("outputs/asim_work_counties.csv", na_rep=0)
# auto ownership - count of hhs by num autos by taz
if process_tm1:
tm1_ao = pd.read_csv(tm1_ao_filename)
tm1_hh = pd.read_csv(tm1_hh_filename)
tm1_ao = tm1_ao.set_index("HHID", drop=False)
tm1_hh["ao"] = tm1_ao["AO"].loc[tm1_hh["hh_id"]].tolist()
tm1_autos = tm1_hh.groupby(["taz", "ao"]).count()["hh_id"]
tm1_autos = tm1_autos.reset_index()
tm1_autos = tm1_autos.pivot(index="taz", columns="ao")
tm1_autos.to_csv("outputs/tm1_autos.csv", na_rep=0)
asim_ao = pd.read_csv(asim_hh_filename)
asim_autos = asim_ao.groupby(["TAZ", "auto_ownership"]).count()["SERIALNO"]
asim_autos = asim_autos.reset_index()
asim_autos = asim_autos.pivot(index="TAZ", columns="auto_ownership")
asim_autos.to_csv("outputs/asim_autos.csv", na_rep=0)
# cdap - ptype count and ptype by M,N,H
if process_tm1:
tm1_cdap = | pd.read_csv(tm1_cdap_filename) | pandas.read_csv |
#!//Users/Jackson/miniconda3/bin/python
'''
Created by: <NAME>
Date: 5/15/18
'''
import numpy as np
import pandas as pd
import glob
import os
np.set_printoptions(suppress=True)
import sys
import os
# %%
# ==============================================================================
# // TITLE
# ==============================================================================
# input1 = './m1v2/YMR186W-SRR1520311-Galaxy53-Rm_rRNA_on_data_17--norc-m1-v2-p4.csv'
# input2 = './m1v2/YPL240C-SRR1520311-Galaxy53-Rm_rRNA_on_data_17--norc-m1-v2-p4.csv'
# base = os.path.basename(Input_name)
# basenoext = os.path.splitext(base)[0]
input1 = str(sys.argv[1])
input2 = str(sys.argv[2])
input3 = str(sys.argv[3])
# %%
# ==============================================================================
# // input and merge along alignment_position
# ==============================================================================
df1 = | pd.read_csv(input1) | pandas.read_csv |
import os
import sys
import pandas
os.makedirs("./filelists_incucyte/", exist_ok=True)
ROOT_FOLDERNAME_WITHIN_SUBMISSION_SYSTEM = ""
#FILENAME = "20210920 Overview CG plates and compounds.xlsx"
#FILENAME = "20220121 Overview CG plates and compounds _consolidated RTG.xlsx"
FILENAME = sys.argv[1]
df_batches = pandas.read_excel(FILENAME, sheet_name="compound batches")
df_compounds = pandas.read_excel(FILENAME, sheet_name="compounds")
df_identifier = df_batches.merge(df_compounds, on="compound ID", how="left", validate="m:1")
df_experiments = pandas.read_excel(FILENAME, sheet_name="experiments")
## do only cv
df_experiments = df_experiments[df_experiments["experiment ID"].str.contains("cv")]
## store expanded compound maps
print("expanding the compound maps...")
compound_map_dict = {}
for see, _ in df_experiments.groupby("compound map see corresponding excel table"):
print(f"Checking the compound map '{see}'...")
df_compound_map = pandas.read_excel(FILENAME, sheet_name=f"compound map {see}")
## expand with lookup-ed ID
for i, s in df_compound_map.iterrows():
#print(i)
#print(s.compound_name)
column_name_for_identification = "compound batch ID"
if pandas.isna(s[column_name_for_identification]):
continue
result = df_identifier.query("`compound batch ID` == '{compound_name}'".format(compound_name= s[column_name_for_identification]))
if type(s[column_name_for_identification]) == int:
result = df_identifier.query("`compound batch ID` == {compound_name}".format(compound_name= s[column_name_for_identification]))
#print(result)
#assert len(result) == 1, (s, result)
if len(result) == 1:
#print(dff.loc[i])
for col in result.columns:
df_compound_map.loc[i, col] = result.squeeze()[col]
else:
print("ERROR: couldn't lookup the compound name '{compound_name}'".format(compound_name= s[column_name_for_identification]))
compound_map_dict.update( {see: df_compound_map})
df_imagings = pandas.read_excel(FILENAME, sheet_name="imaging campaigns")
df_exclude = pandas.read_excel(FILENAME, sheet_name="exclude from file list")
## do only cv
df_imagings = df_imagings[df_imagings["experiment ID"].str.contains("cv")]
df_imagings = df_imagings.merge(df_experiments, on="experiment ID")
df_collector_all = []
for groupname, groupentries in df_imagings.groupby("experiment ID"):
print(groupname)
print("processing the imagings...")
df_collector_one_experiment = []
for i, s in groupentries.iterrows():
assert not | pandas.isna(s["processed images available in folder"]) | pandas.isna |
import unittest
from unittest.mock import Mock
import random
from automl.feature.generators import SklearnFeatureGenerator, FormulaFeatureGenerator, \
Preprocessing, PolynomialGenerator, PolynomialFeatureGenerator
from automl.pipeline import PipelineContext, PipelineData, Pipeline, LocalExecutor
from automl.data.dataset import Dataset
from automl.model import Validate, ModelSpace, ChooseBest
from automl.feature.selector import FeatureSelector
from sklearn import datasets
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Ridge, Lasso
from sklearn.metrics import mean_squared_error
import numpy as np
import pandas as pd
class TestSklearnFeatureGenerator(unittest.TestCase):
def test_call_generator(self):
Transformer = Mock()
Transformer.fit_transform.return_value = []
df = pd.DataFrame([[1, 2], [3, 4]])
X = PipelineData(Dataset(df, None))
context = PipelineContext()
transformer = lambda *args, **kwargs: Transformer
gen = SklearnFeatureGenerator(transformer)
gen(X, context)
Transformer.fit_transform.assert_called()
self.assertTrue((Transformer.fit_transform.call_args[0][0] == df.as_matrix()).all())
def test_generate_polynomial_features_kwargs(self):
Transformer = Mock()
kwargs = {'degree': 3}
transformer = lambda *args, **kwargs: Transformer(*args, **kwargs)
gen = SklearnFeatureGenerator(transformer, **kwargs)
Transformer.assert_called_with(**kwargs)
def test_generate_formula_feature(self):
features = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
df = | pd.DataFrame(features) | pandas.DataFrame |
# coding: utf8
from uuid import uuid4
from collections import Counter, deque
# noinspection PyPackageRequirements
from numpy import array
from pandas import DataFrame
# noinspection PyPackageRequirements
import pytest
from dfqueue import assign_dataframe, get_info_provider
def test_queue_info_provider():
dataframe = DataFrame(array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]),
index=['a1', 'a2', 'a3'], columns=['A', 'B', 'C', 'D'])
queue_name = str(uuid4())
max_size = 4
assign_dataframe(dataframe, max_size, selected_columns=['A', 'D'], queue_name=queue_name)
provider = get_info_provider(queue_name)
assert id(provider.assigned_dataframe) == id(dataframe)
assert provider.max_size == max_size
assert provider.queue_name == queue_name
assert provider.is_default_queue is False
assert provider.queue[0:2] == (('a1', {'A': 1, 'D': 4}), ('a2', {'A': 5, 'D': 8}))
assert provider.queue[-1] == ('a3', {'A': 9, 'D': 12})
assert provider.queue[0] == ('a1', {'A': 1, 'D': 4})
assert provider.queue == deque((('a1', {'A': 1, 'D': 4}), ('a2', {'A': 5, 'D': 8}),
('a3', {'A': 9, 'D': 12})))
assert provider.queue != deque((('a1', {'A': 1, 'D': 4}),))
assert ('a1', {'A': 1, 'D': 4}) in provider.queue
assert ('a5', {'A': 10, 'D': 40}) not in provider.queue
assert provider.counter['a1'] == Counter({frozenset(['A', 'D']): 1})
assert provider.counter['a1'] != Counter({frozenset(['A', 'D']): 10})
assert provider.counter == {'a1': Counter({frozenset(['A', 'D']): 1}),
'a2': Counter({frozenset(['A', 'D']): 1}),
'a3': Counter({frozenset(['A', 'D']): 1})}
assert provider.counter != {'a1': Counter({frozenset(['A', 'D']): 10}),
'a2': Counter({frozenset(['A', 'D']): 10}),
'a3': Counter({frozenset(['A', 'D']): 10})}
assert set(provider.counter.keys()) == set(['a1', 'a2', 'a3'])
assert set(provider.counter.keys()) != set(['a4'])
assert list(provider.counter.values()) == [Counter({frozenset(['A', 'D']): 1}),
Counter({frozenset(['A', 'D']): 1}),
Counter({frozenset(['A', 'D']): 1})]
assert list(provider.counter.values()) != [Counter({frozenset(['A', 'D']): 10}),
Counter({frozenset(['A', 'D']): 10}),
Counter({frozenset(['A', 'D']): 10})]
assert list(provider.counter.items()) == [('a1', Counter({frozenset(['A', 'D']): 1})),
('a2', Counter({frozenset(['A', 'D']): 1})),
('a3', Counter({frozenset(['A', 'D']): 1}))]
assert list(provider.counter.items()) != [('a4', Counter({frozenset(['A', 'D']): 10})),
('a5', Counter({frozenset(['A', 'D']): 10})),
('a6', Counter({frozenset(['A', 'D']): 10}))]
dataframe_2 = DataFrame(array([[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]),
index=['a1', 'a2', 'a3'], columns=['A', 'B', 'C', 'D'])
max_size_2 = 100
assign_dataframe(dataframe_2, max_size_2, selected_columns=['B'], queue_name=queue_name)
assert id(provider.assigned_dataframe) == id(dataframe_2)
assert provider.max_size == max_size_2
assert provider.queue_name == queue_name
assert provider.is_default_queue is False
assert provider.queue[0:2] == (('a1', {'B': 20}), ('a2', {'B': 60}))
assert provider.queue[-1] == ('a3', {'B': 100})
assert provider.queue[0] == ('a1', {'B': 20})
assert provider.queue == deque((('a1', {'B': 20}), ('a2', {'B': 60}), ('a3', {'B': 100})))
assert provider.queue != deque((('a1', {'B': 20}),))
assert ('a1', {'B': 20}) in provider.queue
assert ('a5', {'B': 200}) not in provider.queue
def test_queue_info_provider_default_queue():
provider = get_info_provider()
assert provider.is_default_queue is True
def test_queue_info_provider_error():
provider = get_info_provider()
with pytest.raises(ValueError):
provider.queue['a']
with pytest.raises(AttributeError):
provider.is_default_queue = False
with pytest.raises(AttributeError):
provider.queue_name = 'Another Name'
with pytest.raises(AttributeError):
provider.max_size = 2
with pytest.raises(AttributeError):
provider.assigned_dataframe = | DataFrame() | pandas.DataFrame |
from pytablewriter import RstGridTableWriter, MarkdownTableWriter
import numpy as np
import pandas as pd
from dgl import DGLGraph
from dgl.data.gnn_benckmark import AmazonCoBuy, CoraFull, Coauthor
from dgl.data.karate import KarateClub
from dgl.data.gindt import GINDataset
from dgl.data.bitcoinotc import BitcoinOTC
from dgl.data.gdelt import GDELT
from dgl.data.icews18 import ICEWS18
from dgl.data.qm7b import QM7b
# from dgl.data.qm9 import QM9
from dgl.data import CitationGraphDataset, CoraDataset, PPIDataset, RedditDataset, TUDataset
ds_list = {
"BitcoinOTC": "BitcoinOTC()",
"Cora": "CoraDataset()",
"Citeseer": "CitationGraphDataset('citeseer')",
"PubMed": "CitationGraphDataset('pubmed')",
"QM7b": "QM7b()",
"Reddit": "RedditDataset()",
"ENZYMES": "TUDataset('ENZYMES')",
"DD": "TUDataset('DD')",
"COLLAB": "TUDataset('COLLAB')",
"MUTAG": "TUDataset('MUTAG')",
"PROTEINS": "TUDataset('PROTEINS')",
"PPI": "PPIDataset('train')/PPIDataset('valid')/PPIDataset('test')",
# "Cora Binary": "CitationGraphDataset('cora_binary')",
"KarateClub": "KarateClub()",
"Amazon computer": "AmazonCoBuy('computers')",
"Amazon photo": "AmazonCoBuy('photo')",
"Coauthor cs": "Coauthor('cs')",
"Coauthor physics": "Coauthor('physics')",
"GDELT": "GDELT('train')/GDELT('valid')/GDELT('test')",
"ICEWS18": "ICEWS18('train')/ICEWS18('valid')/ICEWS18('test')",
"CoraFull": "CoraFull()",
}
writer = RstGridTableWriter()
# writer = MarkdownTableWriter()
extract_graph = lambda g: g if isinstance(g, DGLGraph) else g[0]
stat_list=[]
for k,v in ds_list.items():
print(k, ' ', v)
ds = eval(v.split("/")[0])
num_nodes = []
num_edges = []
for i in range(len(ds)):
g = extract_graph(ds[i])
num_nodes.append(g.number_of_nodes())
num_edges.append(g.number_of_edges())
gg = extract_graph(ds[0])
dd = {
"Datset Name": k,
"Usage": v,
"# of graphs": len(ds),
"Avg. # of nodes": np.mean(num_nodes),
"Avg. # of edges": np.mean(num_edges),
"Node field": ', '.join(list(gg.ndata.keys())),
"Edge field": ', '.join(list(gg.edata.keys())),
# "Graph field": ', '.join(ds[0][0].gdata.keys()) if hasattr(ds[0][0], "gdata") else "",
"Temporal": hasattr(ds, "is_temporal")
}
stat_list.append(dd)
print(dd.keys())
df = | pd.DataFrame(stat_list) | pandas.DataFrame |
import numpy as np
import pandas as pd
from random import randint
from statistics import mode
from datetime import datetime
import backend.utils.finder as finder
from dateutil.relativedelta import relativedelta
def arrange_df(df, df_type, relevant_col_idx=None, items_to_delete=None, assembly_df=None, bom_trim=False):
"""
:param bom_trim:
:param df:
pandas.DataFrame object that contains the raw format that is read from the file.
:param df_type:
File type of
:param relevant_col_idx:
:param items_to_delete:
:param assembly_df:
:return:
"""
df = df.copy()
if df_type.lower() == "bom":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "bom")
df.part_no = df.part_no.astype(str)
# If specified, bom will be trimmed
if bom_trim:
df = trim_bom(df)
# This part will be discarded for the time being, 15.04.2020
# Deleting the trial products
# df.drop(df[df.product_no.str.split(".", 0).apply(lambda x: int(x[2]) > 900)].index, inplace = True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# This to be deleted parts can be redundant, so it will be decided that if these codes are going to stay or not
tbd_list = items_to_delete["Silinecekler"].unique().tolist()
df.drop(df[df["part_no"].str.split(".").apply(lambda x: x[0] in tbd_list)].index, inplace=True)
# Deleting the entries where two successive entries are level 1s.
df.drop(df[df.level.eq(df.level.shift(-1, fill_value=1)) & df.level.eq(1)].index, inplace=True)
# Check if the product structure is okay or not, if not okay, delete the corresponding products from the BOM
df.drop(df[df.groupby("product_no").apply(corrupt_product_bom).values].index, inplace=True)
# Transforming the amounts to a desired format for the simulation model.
df.amount = determine_amounts(df)
# Making sure that the dataframe returns in order
df.reset_index(drop=True, inplace=True)
return df
if df_type.lower() == "times":
# Reformatting the columns
df = reformat_columns(df, relevant_col_idx, "times")
# Transforming the machine names to ASCII characters.
df["station"] = format_machine_names(df, "station")
# Transforming non-numeric values to numeric values
df.cycle_times = pd.to_numeric(df.cycle_times, errors="coerce").fillna(0)
df.setup_times = pd.to_numeric(df.setup_times, errors="coerce").fillna(0)
# Grouping by the times of the parts that has multiple times in the same work station
df = df.groupby(["part_no", "station"], as_index=False).agg({"cycle_times": sum, "setup_times": max})
df.drop(df[df["part_no"].duplicated(keep="last")].index, inplace=True)
# Creating the setup matrix
set_list_df = df[["station", "setup_times"]].copy()
set_list_df.columns = ["stations_list", "setup_time"]
set_list_df = set_list_df.groupby(by="stations_list", as_index=False).agg({"setup_time": mode})
set_list_df["setup_prob"] = 1
set_list_df.loc[(set_list_df.stations_list == "ANKASTRE_BOYAHANE") |
(set_list_df.stations_list == "ENDUSTRI_BOYAHANE"), "setup_prob"] = 3 / 100
set_list_df.loc[set_list_df.stations_list == "ANKASTRE_BOYAHANE", "setup_time"] = 900
set_list_df.loc[set_list_df.stations_list == "ENDUSTRI_BOYAHANE", "setup_time"] = 1200
# Creating a dataframe with the assembly times
montaj_df = df[(df["station"] == "BANT") | (df["station"] == "LOOP")]
# Creating a dataframe with the glass bonding
cmy_df = df[df["station"] == "CAM_YAPISTIRMA"]
# Dropping the assembly times from the original times dataframe and resetting the index
df.drop(df[(df["station"] == "BANT") |
(df["station"] == "LOOP") |
(df["station"] == "CAM_YAPISTIRMA") |
(df["part_no"].apply(lambda x: len(x)) == 13)].index, inplace=True)
# Resetting the index
df.reset_index(drop="index", inplace=True)
# Getting rid of the setup column of time matrix
# df.drop("setup_times", axis = 1, inplace = True)
return df, montaj_df, cmy_df, set_list_df
if df_type.lower() == "merged":
df["station"] = level_lookup(df, "level", "station")
df["cycle_times"] = level_lookup(df, "level", "cycle_times")
df.loc[df.level == 1, ["station", "cycle_times"]] = \
pd.merge(df["product_no"], assembly_df[["part_no", "station", "cycle_times"]], "left",
left_on="product_no",
right_on="part_no")[["station", "cycle_times"]]
missing_dict = missing_values_df(df)
missing_df = | pd.DataFrame(missing_dict) | pandas.DataFrame |
from distutils.dir_util import copy_tree
from bids_neuropoly import bids
import pandas as pd
import json
import glob
import os
import argparse
import sys
# This scripts merges 2 BIDS datasets.
# the new participants.tsv and participants.json are merged versions of the initial files.
# 2 Inputs should be added:
# 1. --ifolders: list of the 2 Folders to be merged
# 2. --ofolder: output folder
# Example call:
# python3 merge_BIDS_datasets.py --ifolders ~/first_Dataset/ ~/second_Dataset/ --ofolder ~/mergedDataset/
# <NAME> 2020
# -----------------------------------------------------------------------------------------------------------------------#
def main_run(argv):
CLI = argparse.ArgumentParser()
CLI.add_argument(
"--ifolders",
nargs=2, # 2 folders expected to be merged
type=str,
default=[], # default if nothing is provided - This should give an error later on
)
CLI.add_argument(
"--ofolder", # name on the CLI - drop the `--` for positional/required parameters
nargs=1, # 1 folder expected
type=str,
default=[], # default if nothing is provided
)
# parse the command line
args = CLI.parse_args()
# access CLI options
print("Input folders: %r" % args.ifolders)
print("Output folder: %r" % args.ofolder)
datasetFolder1 = args.ifolders[0]
datasetFolder2 = args.ifolders[1]
output_folder = args.ofolder[0]
print('Make sure there were no inconsistencies in column labels between the two initial participants.tsv files - e.g. subject_id - subject_ids etc.')
# Create output folder if it doesnt exist
if not os.path.exists(output_folder):
os.mkdir(output_folder)
#------------------------------------------------------------------------------------------------------------------------#
# GENERALIZE TO MORE THAN TWO DATASETS
# Load the .tsv files
df1 = bids.BIDS(datasetFolder1).participants.content
df2 = bids.BIDS(datasetFolder2).participants.content
# This gets rid of potential problematic merging of different Types within the same column
df1 = df1.astype(str)
df2 = df2.astype(str)
# Merge the .tsv files and save them in a new file (This keeps also non-overlapping fields)
df_merged = | pd.merge(left=df1, right=df2, how='outer') | pandas.merge |
from tree import Genetreec as gentree
from copy import deepcopy
import tagger
import indicator
import pandas as pd
import backtrader as bt
import math
import numpy as np
from pandas_datareader import data as pdr
import time
class TreeStrategy(bt.Strategy):
params=(('tree', None),)
sellcount = 0
def __init__(self):
self.dataclose = self.datas[0].close
# Para mantener las ordenes no ejecutadas
self.order = None
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
return
self.order = None
return
def next(self):
# Si hay una compraventa pendiente no puedo hacer otra
if self.order:
return
action = self.params.tree.evaluate(date=self.datas[0].datetime.date(0))
if action == 'Buy':
if self.position.size == 0:
self.order = self.buy(size = math.floor(self.broker.get_cash()/(self.datas[0].close*1.01)) )
## Como estamos usando una comisión del 1% las acciones son un 1% más caras.
## La cantidad de acciones que podemos comprar es la parte entera de nuestro
## dinero entre el valor de una acción mas su comisión.
if action == 'Sell':
if self.position.size > 0:
self.order = self.sell(size=self.position.size)
self.sellcount += 1
def stop(self):
self.params.tree.sellcount = self.sellcount
return
#############################################
#### COPY OF TreeStrategy --- Made for plot
#############################################
class plotTreeStrategy(bt.Strategy):
params=(('tree', None),)
def __init__(self):
self.dataclose = self.datas[0].close
# Para mantener las ordenes no ejecutadas
self.order = None
self.tree = self.params.tree
def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
return
self.order = None
return
def next(self):
# Si hay una compraventa pendiente no puedo hacer otra
if self.order:
return
action = self.params.tree.evaluate(date=self.datas[0].datetime.date(0))
if action == 'Buy':
if self.position.size == 0:
self.order = self.buy(size = math.floor(self.broker.get_cash()/(self.datas[0].close*1.01)) )
## Como estamos usando una comisión del 1% las acciones son un 1% más caras.
## La cantidad de acciones que podemos comprar es la parte entera de nuestro
## dinero entre el valor de una acción mas su comisión.
if action == 'Sell':
if self.position.size > 0:
self.order = self.sell(size=self.position.size)
class EndStats(bt.Analyzer):
# Analizador para poder tener en cuenta varias
# estrategias de una sola ejecución (optstrategy)
def __init__(self):
self.start_val = self.strategy.broker.get_value()
self.end_val = None
self.sells=0
def stop(self):
self.end_val = self.strategy.broker.get_value()
self.sells = self.strategy.params.tree.sellcount
def get_analysis(self):
return {"start": self.start_val, "end": self.end_val,
"growth": self.end_val - self.start_val + 10*self.sells, "return": self.end_val / self.start_val}
class Simulate:
data = None
population = None
nextpopulation = None
numbertree = 60
numberiter = 200
start_date_train = "2009-03-20"
end_date_train = "2009-09-21"
start_date_test = "2009-09-22"
end_date_test = "2010-03-19"
symbol = "ENB.TO"
def __init__(self, numtree, numiter, symbol, start_train, end_train, start_test, end_test):
self.numbertree = numtree
self.numberiter = numiter
self.start_date_train = start_train
self.end_date_train = end_train
self.start_date_test = start_test
self.end_date_test = end_test
self.symbol = symbol
# Dados dos árboles, intercambia 'aleatoriamente' dos de sus ramas.
def Crossover(self, atree, btree):
aside, abranch = atree.selectRandomBranch()
bside, bbranch = btree.selectRandomBranch()
auxbranch = None
if aside == "left":
auxbranch = abranch.left
if bside == "left":
abranch.left = bbranch.left
bbranch.left = auxbranch
elif bside == "right":
abranch.left = bbranch.right
bbranch.right = auxbranch
else:
abranch.left = bbranch.root
bbranch.root = auxbranch
elif aside == "right":
auxbranch = abranch.right
if bside == "left":
abranch.right = bbranch.left
bbranch.left = auxbranch
elif bside == "right":
abranch.right = bbranch.right
bbranch.right = auxbranch
else:
abranch.right = bbranch.root
bbranch.root = auxbranch
else:
auxbranch = abranch.root
if bside == "left":
abranch.root = bbranch.left
bbranch.left = auxbranch
elif bside == "right":
abranch.root = bbranch.right
bbranch.right = auxbranch
else:
abranch.root = bbranch.root
bbranch.root = auxbranch
return atree, btree
# Dada una población y sus puntuaciones, devuelve la población del
# algoritmo con sus probabilidades reproductivas.
def Reproductivity(self, score):
pop_score = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division
"""Functions to help detect face, landmarks, emotions, action units from images and videos"""
from collections import deque
from multiprocessing.pool import ThreadPool
import os
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw, ImageOps
import math
from scipy.spatial import ConvexHull
from skimage.morphology.convex_hull import grid_points_in_poly
from skimage.feature import hog
import cv2
import feat
from tqdm import tqdm
from feat.data import Fex
from feat.utils import (
get_resource_path,
face_rect_to_coords,
openface_2d_landmark_columns,
jaanet_AU_presence,
RF_AU_presence,
FEAT_EMOTION_MAPPER,
FEAT_EMOTION_COLUMNS,
FEAT_FACEBOX_COLUMNS,
FEAT_TIME_COLUMNS,
FACET_TIME_COLUMNS,
BBox,
convert68to49,
padding,
resize_with_padding,
align_face_68pts
)
from feat.au_detectors.JAANet.JAA_test import JAANet
from feat.au_detectors.DRML.DRML_test import DRMLNet
from feat.au_detectors.StatLearning.SL_test import RandomForestClassifier, SVMClassifier, LogisticClassifier
from feat.emo_detectors.ferNet.ferNet_test import ferNetModule
from feat.emo_detectors.ResMaskNet.resmasknet_test import ResMaskNet
from feat.emo_detectors.StatLearning.EmoSL_test import EmoRandomForestClassifier, EmoSVMClassifier
import torch
from feat.face_detectors.FaceBoxes.FaceBoxes_test import FaceBoxes
from feat.face_detectors.MTCNN.MTCNN_test import MTCNN
from feat.face_detectors.Retinaface import Retinaface_test
from feat.landmark_detectors.basenet_test import MobileNet_GDConv
from feat.landmark_detectors.pfld_compressed_test import PFLDInference
from feat.landmark_detectors.mobilefacenet_test import MobileFaceNet
import json
from torchvision.datasets.utils import download_url
import zipfile
class Detector(object):
def __init__(
self,
face_model="retinaface",
landmark_model="mobilenet",
au_model="rf",
emotion_model="resmasknet",
n_jobs=1,
):
"""Detector class to detect FEX from images or videos.
Detector is a class used to detect faces, facial landmarks, emotions, and action units from images and videos.
Args:
n_jobs (int, default=1): Number of processes to use for extraction.
Attributes:
info (dict):
n_jobs (int): Number of jobs to be used in parallel.
face_model (str, default=retinaface): Name of face detection model
landmark_model (str, default=mobilenet): Nam eof landmark model
au_model (str, default=rf): Name of Action Unit detection model
emotion_model (str, default=resmasknet): Path to emotion detection model.
face_detection_columns (list): Column names for face detection ouput (x, y, w, h)
face_landmark_columns (list): Column names for face landmark output (x0, y0, x1, y1, ...)
emotion_model_columns (list): Column names for emotion model output
mapper (dict): Class names for emotion model output by index.
input_shape (dict)
face_detector: face detector object
face_landmark: face_landmark object
emotion_model: emotion_model object
Examples:
>> detector = Detector(n_jobs=1)
>> detector.detect_image("input.jpg")
>> detector.detect_video("input.mp4")
"""
self.info = {}
self.info["n_jobs"] = n_jobs
if torch.cuda.is_available():
self.map_location = lambda storage, loc: storage.cuda()
else:
self.map_location = "cpu"
""" LOAD UP THE MODELS """
print("Loading Face Detection model: ", face_model)
# Check if model files have been downloaded. Otherwise download model.
# get model url.
with open(os.path.join(get_resource_path(), "model_list.json"), "r") as f:
model_urls = json.load(f)
if face_model:
for url in model_urls["face_detectors"][face_model.lower()]["urls"]:
download_url(url, get_resource_path())
if landmark_model:
for url in model_urls["landmark_detectors"][landmark_model.lower()]["urls"]:
download_url(url, get_resource_path())
if au_model:
for url in model_urls["au_detectors"][au_model.lower()]["urls"]:
download_url(url, get_resource_path())
if ".zip" in url:
import zipfile
with zipfile.ZipFile(os.path.join(get_resource_path(), "JAANetparams.zip"), 'r') as zip_ref:
zip_ref.extractall(os.path.join(get_resource_path()))
if au_model.lower() in ['logistic', 'svm', 'rf']:
download_url(
model_urls["au_detectors"]['hog-pca']['urls'][0], get_resource_path())
download_url(
model_urls["au_detectors"]['au_scalar']['urls'][0], get_resource_path())
if emotion_model:
for url in model_urls["emotion_detectors"][emotion_model.lower()]["urls"]:
download_url(url, get_resource_path())
if emotion_model.lower() in ['svm', 'rf']:
download_url(
model_urls["emotion_detectors"]['emo_pca']['urls'][0], get_resource_path())
download_url(
model_urls["emotion_detectors"]['emo_scalar']['urls'][0], get_resource_path())
if face_model:
if face_model.lower() == "faceboxes":
self.face_detector = FaceBoxes()
elif face_model.lower() == "retinaface":
self.face_detector = Retinaface_test.Retinaface()
elif face_model.lower() == "mtcnn":
self.face_detector = MTCNN()
self.info["face_model"] = face_model
facebox_columns = FEAT_FACEBOX_COLUMNS
self.info["face_detection_columns"] = facebox_columns
predictions = np.empty((1, len(facebox_columns)))
predictions[:] = np.nan
empty_facebox = pd.DataFrame(predictions, columns=facebox_columns)
self._empty_facebox = empty_facebox
print("Loading Face Landmark model: ", landmark_model)
if landmark_model:
if landmark_model.lower() == "mobilenet":
self.landmark_detector = MobileNet_GDConv(136)
self.landmark_detector = torch.nn.DataParallel(
self.landmark_detector)
checkpoint = torch.load(
os.path.join(
get_resource_path(),
"mobilenet_224_model_best_gdconv_external.pth.tar",
),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
elif landmark_model.lower() == "pfld":
self.landmark_detector = PFLDInference()
checkpoint = torch.load(
os.path.join(get_resource_path(),
"pfld_model_best.pth.tar"),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
elif landmark_model.lower() == "mobilefacenet":
self.landmark_detector = MobileFaceNet([112, 112], 136)
checkpoint = torch.load(
os.path.join(
get_resource_path(), "mobilefacenet_model_best.pth.tar"
),
map_location=self.map_location,
)
self.landmark_detector.load_state_dict(
checkpoint["state_dict"])
self.info["landmark_model"] = landmark_model
self.info["mapper"] = openface_2d_landmark_columns
landmark_columns = openface_2d_landmark_columns
self.info["face_landmark_columns"] = landmark_columns
predictions = np.empty((1, len(openface_2d_landmark_columns)))
predictions[:] = np.nan
empty_landmarks = pd.DataFrame(predictions, columns=landmark_columns)
self._empty_landmark = empty_landmarks
print("Loading au model: ", au_model)
self.info["au_model"] = au_model
if au_model:
if au_model.lower() == "jaanet":
self.au_model = JAANet()
elif au_model.lower() == "drml":
self.au_model = DRMLNet()
elif au_model.lower() == "logistic":
self.au_model = LogisticClassifier()
elif au_model.lower() == "svm":
self.au_model = SVMClassifier()
elif au_model.lower() == 'rf':
self.au_model = RandomForestClassifier()
if (au_model is None) or (au_model.lower() in ['jaanet', 'drml']):
auoccur_columns = jaanet_AU_presence
else:
auoccur_columns = RF_AU_presence
self.info["au_presence_columns"] = auoccur_columns
predictions = np.empty((1, len(auoccur_columns)))
predictions[:] = np.nan
empty_au_occurs = | pd.DataFrame(predictions, columns=auoccur_columns) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from tabular_dataset import TabularDataset
def get_test_df():
return pd.DataFrame({
'A': [1, 2, 3, np.nan],
'B': [0, 1, 0, np.nan],
'C': list('abba'),
'target': list('xyzx')
})
def test_column_names_are_correctly_set():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'])
assert tds.numerical.column_names == ['A']
def test_scale():
df = get_test_df()
tds = TabularDataset(df, numerical_columns=['A'])
tds.numerical.scale()
assert repr(tds.x) == repr(np.array([[0.], [0.5], [1.], [np.nan]]))
def test_scale_no_fit():
df = get_test_df()
test_data = df.iloc[-2:]
tds = TabularDataset(df, test_data=test_data, numerical_columns=['A'])
tds.numerical.scale()
_ = tds.x_train
assert repr(tds.x_test) == repr(np.array([[1.], [np.nan]]))
def test_normalize():
df = get_test_df()
tds = TabularDataset(df.dropna(), numerical_columns=['A'])
expected_result = np.array([-1., 0., 1.])
tds.numerical.normalize()
actual_result = tds.x[:, 0]
assert np.allclose(actual_result, expected_result)
def test_normalize_no_fit():
df = get_test_df()
tds = TabularDataset(df.iloc[[0, 1]], test_data=df.iloc[[2]],
numerical_columns=['A'])
expected_result = np.array([2.12132034])
tds.numerical.normalize()
_ = tds.x_train
actual_result = tds.x_test[:, 0]
assert np.allclose(actual_result, expected_result)
def test_log():
df = pd.DataFrame({'A': [-2, -1, 0, 1, 2, np.nan]})
tds = TabularDataset(df, numerical_columns=['A'])
expected_result = np.array([np.nan, -float('inf'), 0.000000, 0.693147,
1.098612, np.nan])
# Ignore "divide by zero" warning for testing.
with np.testing.suppress_warnings() as sup:
sup.filter(RuntimeWarning)
tds.numerical.log()
actual_result = tds.x[:, 0]
assert np.allclose(actual_result, expected_result, equal_nan=True)
def test_power():
df = pd.DataFrame({'A': [-2, -1, 0, 1, 2, np.nan]})
tds = TabularDataset(df, numerical_columns=['A'])
expected_result = np.array([4., 1., 0., 1., 4., np.nan])
tds.numerical.power(exponent=2)
actual_result = tds.x[:, 0]
assert np.allclose(actual_result, expected_result, equal_nan=True)
def test_ranks_with_default_method():
df = | pd.DataFrame({'A': [0, 2, 3, 2]}) | pandas.DataFrame |
# Standard libraries
from pathlib import Path
from datetime import datetime
from collections import Counter
from typing import List, Dict, Any, Tuple
# Third-party libraries
import requests
import pandas as pd
import plotly.graph_objects as go
from tqdm import tqdm
from plotly import subplots, offline
# Local Libraries
# TODOS
# - Unit tests
# - Setup pipeline on GitHub
def _setup_data_dir() -> None:
"""
Helper function to create necessary data directory and files.
"""
data_dir = Path("data/")
if not data_dir.exists():
print(f"\nCreating data directory: {data_dir}")
data_dir.mkdir()
empty_df = pd.DataFrame(
columns=[
"issues_open",
"issues_closed",
"issues_total",
"prs_open",
"prs_closed",
"prs_total",
"hacktoberfest_issues_open",
"hacktoberfest_issues_closed",
"hacktoberfest_issues_total",
]
)
empty_df.to_csv(data_dir.joinpath("pyjanitor_hacktoberfest_2020.csv"))
def _get_total_page_count(url: str) -> int:
"""
Helper function to get total number of pages for scrapping GitHub.
Notes:
- This page count is equivalent to the sum of total Issues
and Pull Requests (regardless of open of closed status).
- This should be the same number as navigating to the Issues
page of a repository and deleting the ``is:issue is:open``
filter from the search bar and pressing Enter. The sum of
``Open`` and ``Closed`` should be the same as page count
returned here.
"""
# Order matters here for last_url ensue ``page`` follows ``per_page``
params = {"state": "all", "per_page": 1, "page": 0}
response = requests.get(url=url, params=params) # type: ignore[arg-type]
if not response.ok:
raise requests.exceptions.ConnectionError(f"Failed to connect to {url}")
last_url_used = response.links["last"]["url"]
start_index = last_url_used.find("&page=")
page_count = int(last_url_used[(start_index + len("&page=")) :])
return page_count
def _calculate_needed_batches(page_count: int) -> int:
"""
Helper function to determine batch size for GitHub scrapping.
This accounts for the GitHub API use of pagination.
This accounts for scrapping 100 pages per batch.
"""
n_batches = page_count // 100 + 1
return n_batches
def scrape_github(url: str, needed_batches: int) -> List[Dict[str, Any]]:
"""
Scrape GitHunb repo and collect all Issues and Pull Requests.
Notes:
- GitHub treats Pull Requests as Issues.
- ``state: 'all'`` is used to collect both Open and Closed
Issues/PRs.
- GitHub API uses pagination, which means it returns only a
chunk of information in a single request. Thus, need to
set how many records will be included per page and the
page number. Here, we collect 100 items per request.
"""
scraped_data = []
# Pagination needs to start at 1 since page 0 and page 1 are duplicates
# Account for this by using range from [1,batches+1] instead of [0,batches]
for i in tqdm(range(1, needed_batches + 1), desc="\tGitHub Scrape", ncols=75):
params = {"state": "all", "per_page": 100, "page": i}
response = requests.get(url=url, params=params)
scraped_data.extend(response.json())
return scraped_data
def create_metadata_df(scraped_data: List[Dict[str, Any]]) -> pd.DataFrame:
"""
Create a summary dataframe of all info scraped from GitHub.
"""
df = pd.DataFrame(scraped_data)
keep_cols = [
"number",
"title",
"state",
"labels",
"created_at",
"updated_at",
"closed_at",
"pull_request",
]
df = df[keep_cols]
df = df.assign(labels=df["labels"].apply(lambda x: [d.get("name") for d in x]))
df = df.assign(
hacktober=df["labels"].apply(lambda x: "hacktoberfest" in x).astype(int)
)
return df
def create_topic_dfs(
metadata_df: pd.DataFrame,
) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""
Helper function to split Issues, PRs, and Hacktoberfest Issues
into separate, individual dataframes.
"""
issues_df = metadata_df[metadata_df["pull_request"].isnull()]
prs_df = metadata_df[~metadata_df["pull_request"].isnull()]
hack_issues_df = issues_df[issues_df["hacktober"] == 1]
return issues_df, prs_df, hack_issues_df
def collect_topic_counts(topic_df: pd.DataFrame, topic_str: str) -> Dict[str, int]:
"""
Counts Open, Closed, and Total items for a provided topic.
Topics are Issues, PRs, or Hacktoberfest Issues.
"""
cnts = Counter(topic_df["state"]) # type: ignore[var-annotated]
# If there is nothing closed, add it as a 0 (hacktoberfest)
cnts["closed"] = cnts.get("closed", 0)
cnts["total"] = len(topic_df)
# Add prefix to counts for easy dataframe creation downstream
cnts = {f"{topic_str}_{k}": v for k, v in cnts.items()} # type: ignore[assignment]
return cnts
def _get_todays_data() -> str:
""" Helper function to get today's date in YYYY-MM-DD format."""
return datetime.today().strftime("%Y-%m-%d")
def create_current_cnt_df(
issue_cnts: Dict[str, int],
pr_cnts: Dict[str, int],
hack_issue_cnts: Dict[str, int],
previous_data_path: Path,
) -> pd.DataFrame:
"""
Create a dataframe that stores the count data as of ``today``.
This prepares data for plotting and saving.
"""
today = _get_todays_data()
today_cnt_df = pd.DataFrame(
{**issue_cnts, **pr_cnts, **hack_issue_cnts}, index=[today]
)
previous_cnt_df = pd.read_csv(previous_data_path, index_col=0)
current_cnt_df = pd.concat([previous_cnt_df, today_cnt_df])
print(f"\tUpdating count data by appending counts from {today}")
print(f"\tSaving updates to {previous_data_path}")
current_cnt_df.to_csv(previous_data_path)
return current_cnt_df
def _make_scatter_trace(
current_cnt_df: pd.DataFrame, plot_col: str, name: str
) -> go.Scatter:
"""
Helper function to create the scatter traces for Issues and PRs.
"""
ORANGE = "#ff580a"
BLACK = "#080808"
line_color = ORANGE if "issues" in plot_col else BLACK
circle_color = BLACK if "issues" in plot_col else ORANGE
trace = go.Scatter(
x=pd.to_datetime(current_cnt_df.index, format="%Y-%m-%d"),
y=current_cnt_df[plot_col],
mode="lines+markers",
name=name,
marker=dict(color=line_color, line=dict(width=2, color=circle_color)),
showlegend=False,
)
return trace
def _make_bar_trace(current_cnt_df: pd.DataFrame, name: str) -> go.Bar:
"""
Helper function to create the stacked bar traces for Issues and PRs.
"""
ORANGE = "#ff580a"
BLACK = "#080808"
color = ORANGE if name == "Open" else BLACK
today = _get_todays_data()
trace = go.Bar(
name=name,
x=["Hacktoberfest Issues", "Issues", "Pull Requests"],
y=current_cnt_df.loc[today][
[
f"hacktoberfest_issues_{name.lower()}",
f"issues_{name.lower()}",
f"prs_{name.lower()}",
]
].values,
marker_color=color,
width=0.5,
)
return trace
def _annotate_scatter(fig: go.Figure, current_cnt_df: pd.DataFrame) -> None:
"""
Helper function to annotate Issues and PRs on scatter plot.
Legend is not shown so need annotations to see which line
pertains to Issues and which line pertains to PRs.
"""
x_loc = | pd.to_datetime(current_cnt_df.iloc[0].name, format="%Y-%m-%d") | pandas.to_datetime |
import pandas as pd
import utils_eia923 as utils_eia923
import os, sys
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
sys.path.append(os.environ.get("PROJECT_ROOT"))
cols = ['Operator Id',
'Plant Id',
'Census Region',
'Reported\nFuel Type Code',
'Elec_MMBtu\nJanuary',
'Elec_MMBtu\nFebruary',
'Elec_MMBtu\nMarch',
'Elec_MMBtu\nApril',
'Elec_MMBtu\nMay',
'Elec_MMBtu\nJune',
'Elec_MMBtu\nJuly',
'Elec_MMBtu\nAugust',
'Elec_MMBtu\nSeptember',
'Elec_MMBtu\nOctober',
'Elec_MMBtu\nNovember',
'Elec_MMBtu\nDecember',
'Netgen\nJanuary',
'Netgen\nFebruary',
'Netgen\nMarch',
'Netgen\nApril',
'Netgen\nMay',
'Netgen\nJune',
'Netgen\nJuly',
'Netgen\nAugust',
'Netgen\nSeptember',
'Netgen\nOctober',
'Netgen\nNovember',
'Netgen\nDecember'
]
# load all raw excel files
eia923_2015_path = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'raw',
'EIA923_Schedules_2_3_4_5_M_12_2015_Final_Revision.xlsx')
df_2015 = pd.read_excel(eia923_2015_path, skiprows=5, usecols=cols)
df_2015 = df_2015[df_2015['Census Region'] != 'PACN']
print('Read 2015')
eia923_2016_path = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'raw',
'EIA923_Schedules_2_3_4_5_M_12_2016_Final_Revision.xlsx')
df_2016 = pd.read_excel(eia923_2016_path, skiprows=5, usecols=cols)
df_2016 = df_2016[df_2016['Census Region'] != 'PACN']
print('Read 2016')
eia923_2017_path = os.path.join(os.environ.get("PROJECT_ROOT"), 'data', 'raw',
'EIA923_Schedules_2_3_4_5_M_12_2017_Final_Revision.xlsx')
df_2017 = | pd.read_excel(eia923_2017_path, skiprows=5, usecols=cols) | pandas.read_excel |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
import json
import mmap
import os
import random
import re
from collections import Counter
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from tqdm import tqdm
class SUBEVENTKG_Processor(object):
"""
对EVENTKG数据集取适用于知识表示任务的子数据集
原数据集地址链接在https://eventkg.l3s.uni-hannover.de/data.html,采用3.0版本的数据包
"""
def __init__(self,
events_nq_path,
entities_nq_path,
relations_base_nq_path,
relations_events_other_nq_path,
relations_entities_temporal_nq_path,
processed_entities_path,
processed_events_path,
processed_event_event_path,
processed_event_entity_path,
processed_entity_entity_path,
filt_event_event_path,
filt_event_entity_path,
filt_entity_entity_path,
event_node_count_path,
entity_node_count_path,
event_rdf2name_path,
entity_rdf2name_path,
relation_rdf2name_path,
event_lut_path,
entity_lut_path,
relation_lut_path,
event_degree_list,
entity_degree_list
):
"""
Args:
events_nq_path:事件节点的原始数据集
entities_nq_path:实体节点的原始数据集
relations_base_nq_path:事件-事件(无时间信息)原始数据集
relations_events_other_nq_path:事件-实体,实体-事件原始数据集
relations_entities_temporal_nq_path:实体-实体(有时间信息)原始数据集
processed_entities_path:实体的字典,rdf->count(0)
processed_events_path:事件的字典,rdf->count(0)
processed_event_event_path:转化为五元组格式的 事件-事件
processed_event_entity_path:转化为五元组格式的 事件-实体
processed_entity_entity_path:转化为五元组格式的 实体-实体
filt_event_event_path:过滤后的 事件-事件 五元组
filt_event_entity_path:过滤后的 事件-实体 五元组
filt_entity_entity_path:过滤后的 实体-实体 五元组
event_node_count_path:统计出来的事件节点个数
entity_node_count_path:统计出来的实体节点个数
event_rdf2name_path:事件rdf转name
entity_rdf2name_path:实体rdf转name
relation_rdf2name_path:关系rdfname
event_lut_path:事件查找表路径
entity_lut_path:实体查找表路径
relation_lut_path:关系查找表路径
event_degree_list:事件度的列表
entity_degree_list:实体度的列表
"""
self.raw_events_path = events_nq_path
self.raw_entities_path = entities_nq_path
self.raw_event_event_path = relations_base_nq_path
self.raw_event_entity_path = relations_events_other_nq_path
self.raw_entity_entity_path = relations_entities_temporal_nq_path
self.processed_entities_path = processed_entities_path
self.processed_events_path = processed_events_path
self.processed_event_event_path = processed_event_event_path
self.processed_event_entity_path = processed_event_entity_path
self.processed_entity_entity_path = processed_entity_entity_path
self.filt_event_event_path = filt_event_event_path
self.filt_event_entity_path = filt_event_entity_path
self.filt_entity_entity_path = filt_entity_entity_path
self.event_node_count_path = event_node_count_path
self.entity_node_count_path = entity_node_count_path
self.event_rdf2name_path = event_rdf2name_path
self.entity_rdf2name_path = entity_rdf2name_path
self.relation_rdf2name_path = relation_rdf2name_path
self.event_lut_path = event_lut_path
self.entity_lut_path = entity_lut_path
self.relation_lut_path = relation_lut_path
self.event_degree_list = event_degree_list
self.entity_degree_list = entity_degree_list
self.entity_dict = None
self.event_dict = None
self.rdf_triplets_event_event = None
self.rdf_triplets_event_entity = None
self.rdf_triplets_entity_entity = None
self.filt_triplets_event_event = None
self.filt_triplets_event_entity = None
self.filt_triplets_entity_entity = None
self.event_rdf2name_dict = dict()
self.entity_rdf2name_dict = dict()
self.relation_rdf2name_dict = dict()
self.filt_event_set = set()
self.filt_entity_set = set()
self.filt_relation_set = set()
seed = 1
random.seed(seed)
np.random.seed(seed)
def _get_num_lines(self, file_path):
"""
统计txt文件的行数
:param file_path:待统计行数的txt文件路径
"""
fp = open(file_path, "r+")
buf = mmap.mmap(fp.fileno(), 0)
lines = 0
while buf.readline():
lines += 1
return lines
def create_entities_index(self, reprocess=True, describe=True):
"""
建立{实体rdf:count}字典,这个实体是全集
Args:
reprocess:True为重新处理
describe:True为显示数据集信息
"""
if reprocess:
self.entity_dict = dict()
print("processing entities index...")
with open(self.raw_entities_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_entities_path)):
line = line.strip().split(" ")
entity = line[0]
if entity not in self.entity_dict.keys():
self.entity_dict[entity] = 0
json.dump(self.entity_dict, open(self.processed_entities_path, "w"), indent=4, sort_keys=True)
print("processed_entities_dict has been saved in {}".format(self.processed_entities_path))
else:
if os.path.exists(self.processed_entities_path):
print("loading entities index...")
with open(self.processed_entities_path) as file:
self.entity_dict = json.load(file)
print("loading entities index succeed!")
else:
raise FileNotFoundError("processed_entities_path does not exists!")
if describe:
print("entities_dict_len", len(self.entity_dict))
def create_events_index(self, reprocess=True, describe=True):
"""
建立{事件rdf:count}字典,这个事件是全集
"""
if reprocess:
self.event_dict = dict()
print("processing events index...")
with open(self.raw_events_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(self.raw_events_path)):
line = line.strip().split(" ")
event = line[0]
if event not in self.event_dict.keys():
self.event_dict[event] = 0
json.dump(self.event_dict, open(self.processed_events_path, "w"), indent=4, sort_keys=True)
print("processed_events_dict has been saved in {}".format(self.processed_events_path))
else:
if os.path.exists(self.processed_events_path):
print("loading events index...")
with open(self.processed_events_path) as file:
self.event_dict = json.load(file)
print("loading events index succeed!")
else:
raise FileNotFoundError("processed_entities_path does not exists!")
if describe:
print("events_dict_len", len(self.event_dict))
def event_event_raw2df(self, reprocess=True, describe=True):
"""
找出事件与事件的hassubevent,nextevent,previousevent三种关系,并转化成dataframe格式保存
原始格式
事件 关系 事件
存储格式
事件 关系 事件 开始时间 结束时间 (事件和事件三元组没有时间信息,表示为-1)
"""
if reprocess:
df_lines = []
with open(self.raw_event_event_path, "r", encoding="utf-8") as file:
print("processing event_event_raw2df...")
for line in tqdm(file, total=self._get_num_lines(self.raw_event_event_path)):
line = line.strip().split(" ")
if line[1] == "<http://dbpedia.org/ontology/nextEvent>" or \
line[1] == "<http://dbpedia.org/ontology/previousEvent>" or \
line[1] == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasSubEvent>":
head = line[0]
relation = line[1]
tail = line[2]
df_lines.append([head, relation, tail, -1, -1])
self.rdf_triplets_event_event = pd.DataFrame(df_lines)
self.rdf_triplets_event_event.columns = ["head", "relation", "tail", "start_time", "end_time"]
self.rdf_triplets_event_event.to_csv(self.processed_event_event_path)
print("rdf_triplets_event_event has been saved in {}".format(self.processed_event_event_path))
else:
if os.path.exists(self.processed_event_event_path):
print("loading event_event_raw2df...")
self.rdf_triplets_event_event = pd.read_csv(self.processed_event_event_path)
print("loading event_event_raw2df succeed!")
else:
raise FileNotFoundError("processed_event_event_path does not exists!")
if describe:
print("rdf_triplets_event_event_len", len(self.rdf_triplets_event_event))
def _node_relation_datatype_raw2df(self,
reprocess=True,
describe=True,
datatype=None,
raw_data_path=None,
saved_path=None):
def init_relation_node_dict(relation_node_dict, raw_data_path):
"""嵌套字典初始化"""
with open(raw_data_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(raw_data_path)):
line = line.strip().split(" ")
relation_node = line[0]
if relation_node not in relation_node_dict.keys():
relation_node_dict[relation_node]["head"] = -1
relation_node_dict[relation_node]["relation"] = -1
relation_node_dict[relation_node]["tail"] = -1
relation_node_dict[relation_node]["start_time"] = -1
relation_node_dict[relation_node]["end_time"] = -1
return relation_node_dict
def add_value_relation_node_dict(relation_node_dict, raw_data_path):
"""嵌套字典添加值"""
with open(raw_data_path, "r", encoding="utf-8") as file:
for line in tqdm(file, total=self._get_num_lines(raw_data_path)):
line = line.strip().split(" ")
relation_node = line[0]
arrow = line[1]
value = line[2]
if arrow == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#subject>":
relation_node_dict[relation_node]["head"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/roleType>":
relation_node_dict[relation_node]["relation"] = value
if arrow == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#object>":
relation_node_dict[relation_node]["tail"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasBeginTimeStamp>":
relation_node_dict[relation_node]["start_time"] = value
if arrow == "<http://semanticweb.cs.vu.nl/2009/11/sem/hasEndTimeStamp>":
relation_node_dict[relation_node]["end_time"] = value
return relation_node_dict
if reprocess:
relation_node_dict = defaultdict(dict)
print("processing {} _raw2df...".format(datatype))
relation_node_dict = init_relation_node_dict(relation_node_dict, raw_data_path)
relation_node_dict = add_value_relation_node_dict(relation_node_dict, raw_data_path)
# 嵌套字典转dataframe
df_lines = []
for key in tqdm(relation_node_dict.keys()):
df_lines.append([relation_node_dict[key]["head"],
relation_node_dict[key]["relation"],
relation_node_dict[key]["tail"],
relation_node_dict[key]["start_time"],
relation_node_dict[key]["end_time"]])
df = pd.DataFrame(df_lines)
df.columns = ["head", "relation", "tail", "start_time", "end_time"]
if datatype == "event_entity":
self.rdf_triplets_event_entity = df
if datatype == "entity_entity":
self.rdf_triplets_entity_entity = df
df.to_csv(saved_path)
print("rdf_triplets_{} has been saved in {}".format(datatype, saved_path))
else:
if os.path.exists(saved_path):
print("loading {}_raw2df...".format(datatype))
df = | pd.read_csv(saved_path) | pandas.read_csv |
# Importing Libraries
# libraries for webapp
import streamlit as st
#libraries for EDA
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import mplcyberpunk
import plotly.figure_factory as ff
import plotly.io as pio
import datetime
# Machine Learning and Deep Learning Libraries
from tensorflow import keras
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, LeakyReLU
from keras.layers import LSTM ,Bidirectional
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import TimeSeriesSplit
from tensorflow.keras import backend as K
from sklearn.metrics import r2_score
from keras.regularizers import L1L2
from tensorflow.keras.models import load_model
from numpy.random import seed
## Data exploration
#st.set_page_config(layout="wide")
# creates a sidebar on the webapp with multiple subpage options
option = st.sidebar.selectbox("Choose the following options",("Data Analysis", "Sales Forecasting","Custom Forecasting"))
# condition to enter the first page
if option == "Data Analysis":
# title of the first page
st.title('Customer Retail Sales Forecasting')
# created a space between the main title and the rest of the page
st.write("---")
"""
### Data Source:
"""
st.markdown("""[TravelSleek:]('https://travelsleek.in/') It's a family-owned E-commerce business that specializes
in personalized travel products which are manufactured from Faux Leather.""")
# creates a dropdown box
with st.expander("Products"):
st.write("""
Customized Products.
""")
images = ['../plots/item1.jpg','../plots/item2.jpg', '../plots/item3.jpg']
st.image(images, width = 200, use_column_width=False) #caption=["some generic text"] * len(images)
st.markdown("""[ImageSource:](https://travelsleek.in/)""")
st.write('_____________________________')
"""
### Tech Stack
"""
# creating columns in the webapp so that the text or images can be arranged in the column format
col1, col2, col3 = st.columns(3)
with col1:
"""




"""
with col2:
"""




"""
with col3:
"""




"""
##########################################################################################
################### From plotly express ###################################################
st.write('_____________________________')
st.header(option)
"""
### Info:
- Data was exported from E-commerce platform to .csv file.
- Timeline of the data availability is from October 2019 to Jan 2022.
- October 2019 to October 2020 on Instagram Market Place.
- November 2020 to Jan 2022 on E-commerce platform.
- The size of the data was 8000 sales data points and 77 features associated with it.
- Nearly, 36% of the data was filled with null val!!!!
- Total SKU in the Inventory : 35 SKU
"""
st.write('_____________________________')
st.subheader('Top5 selling products and their Revenue Generation')
# calling the .csv file
Top5 = | pd.read_csv('../forecast_data/Top5.csv') | pandas.read_csv |
import numpy as np
import pandas as pd
import string
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer # get from VP later
from nltk.tokenize import word_tokenize
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer
def create_user_feature():
'''
Return a user_feature matrix
Takes in the transaction list from the Movielens 100k dataset
and replaces the userId with a feature vector representing
the number of movies seen by the user per genre
possible genres include the following:
'IMAX', 'Adventure', 'Mystery', 'Animation', 'Documentary', 'Comedy',
'Western', 'War', 'Film-Noir', 'Crime', 'Drama', 'Thriller', 'Fantasy',
'Action', 'Sci-Fi', 'Children', 'Romance', 'Horror', 'Musical',
'(no genres listed)'
Input
---------
none
Output
---------
user_feature (pd.DataFrame): feature_vector containing number of count of
genres seen based on ratings given by a user
- each movie can have several genres
- each row correspond to a transaction (user rating)
'''
raw_transaction_list = pd.read_csv('sample_data/ratings.csv')
transaction_list = raw_transaction_list[['userId','movieId', 'rating']].copy()
# reduce size of DataFrame for transaction_list by downcasting
for col in transaction_list:
if transaction_list[col].dtype == 'int64':
transaction_list[col] = pd.to_numeric(transaction_list[col], downcast='integer')
if transaction_list[col].dtype == 'float64':
transaction_list[col] = pd.to_numeric(transaction_list[col], downcast='float')
# preprocess movie list and genres
movie_description = pd.read_csv('sample_data/movies.csv')
movie_description = movie_description.set_index('movieId')
movie_description['genre'] = movie_description['genres'].str.split('|')
# extract the genres for the movie in each transaction/rating
movie_IDs_list = transaction_list['movieId']
transaction_list['genre'] = list(movie_description.loc[movie_IDs_list[:len(movie_IDs_list)]]['genre'])
# count the number of genres seen by each userId
genre_count = (transaction_list.groupby('userId')['genre']
.apply(list)
.apply(lambda x: [item for sublist in x for item in sublist])
.apply(Counter))
# remove genre column in transaction list (just to conserve memspace)
del transaction_list['genre']
# create user_feature with count of genres per user
user_feature = pd.DataFrame(list(genre_count)).fillna(0)
for col in user_feature:
user_feature[col] = pd.to_numeric(user_feature[col], downcast='integer')
user_feature['userId'] = genre_count.index
# re-arrange columns
cols = user_feature.columns.tolist()
cols = cols[-1:] + cols[:-1]
user_feature = user_feature[cols]
# rename cols
old_cols = user_feature.columns[1:]
new_cols = []
for idx, col in enumerate(cols[1:], 1):
new_cols.append(f'u_{idx}')
user_feature.rename(columns=dict(zip(old_cols, new_cols)), inplace=True)
return raw_transaction_list, user_feature
def preprocess_string(text):
''' Preprocess text for tf-idf
Transforms the text into lowercase and removes symbols
and punctuations
Removes stopwords using NLTK library
Lemmatizes words using SnowballStemmer (NLTK Library)
Input
--------
text (string) : string from the Movielens synopsis dataset
Output
--------
new_text (string) : preprocessed text for further tf-idf processing
'''
stop_words = stopwords.words('english')
stemmer = SnowballStemmer(language='english')
text = text.lower()
text = ''.join([char for char in text if char not in string.punctuation])
new_text = ""
words = word_tokenize(text)
for word in words:
if word not in stop_words and len(word) > 2:
new_text = new_text + " " + stemmer.stem(word)
return new_text
# +
def create_item_feature(num_features = 300):
'''
Return item_feature matrix based on TF-IDF of Movie Synopsis
Takes in the list of movies that has been rated in the MovieLens 100k
dataset and fetches the respective synopsis for TF-IDF computation
Input
---------
num_features : number of features to be used for the TF-IDF extraction
: default value 300 (~sqrt[100k rows])
Output
---------
item_feature (pd.DataFrame): feature_vector from TF-IDF extracted
from movie synopses the TheMovieDB dataset
'''
transaction_list = pd.read_csv('sample_data/ratings.csv', usecols=['movieId'])
# filter the unique movie IDs
seen_movies = pd.DataFrame(transaction_list['movieId'].unique(), columns={'movieId'})
# the synopsis is based on the "The Movie DB" Id system
# links.csv has a mapping between MovieLens ID and The MovieDB Id
movie_id_links = pd.read_csv('sample_data/links.csv', usecols =['movieId','tmdbId'])
movie_id_links = movie_id_links.dropna()
movie_id_links.head()
# get mapping between MovieLens IDs and TMDB IDs
seen_movies = seen_movies.merge(movie_id_links, on='movieId', how='inner')
# Read MetaData CSV file with movie plots/synopsis
metadata = pd.read_csv('sample_data/movies_metadata.csv', usecols=['id','overview'])
metadata = metadata.rename(columns={'id':'tmdbId'})
# drop movies with invalid tmbdId (e.g., date string instead of integer)
ids1 = pd.to_numeric(metadata['tmdbId'], errors='coerce').isna()
metadata = metadata.drop(metadata[ids1].index)
# drop movies with NaN synopsis
metadata = metadata.dropna()
metadata['tmdbId'] = metadata['tmdbId'].astype(float)
metadata = metadata.drop_duplicates(subset=['tmdbId'])
# get only synopsis for movies in the transaction list
synopsis_set = seen_movies.merge(metadata, on='tmdbId', how='inner')
# preprocess synopsis strings
synopsis_set['overview'] = synopsis_set['overview'].apply(preprocess_string)
# TF-IDF processing
tfidfvectorizer = TfidfVectorizer(analyzer='word', token_pattern = '[a-z]+\w*', stop_words='english', max_features=num_features)
tfidf_vector = tfidfvectorizer.fit_transform(synopsis_set['overview'])
tfidf_df = pd.DataFrame(tfidf_vector.toarray(), index=synopsis_set['movieId'], columns=tfidfvectorizer.get_feature_names_out())
# normalization per column (word)
tfidf_df = tfidf_df.apply(lambda x: (x - x.min())/(x.max() - x.min()))
tfidf_df = tfidf_df.reset_index()
# rename cols
old_cols = tfidf_df.columns
new_cols = []
new_cols.append(old_cols[0])
for idx, col in enumerate(old_cols[1:], 1):
new_cols.append(f'i_{idx}')
tfidf_df.rename(columns=dict(zip(old_cols, new_cols)), inplace=True)
return tfidf_df
# -
def load_data(aug_tt, item_tt,user_tt):
"""
Load the data from the transaction tables
Paramters
---------
aug_tt : str
File name of the parquet file with each row corresponding
to a user's features, an item's features, and the user's
rating for that item
item_tt : str
File name of the parquet file with each row corresponding
to an item's features
user_tt : str
File name of the parquet file with each row corresponding
to a user's features
Returns
-------
df : pandas DataFrame
The augmented transaction table
item_df : pandas DataFrame
The item features as a transaction table
user_df : pandas DataFrame
The userfeatures as a transaction table
item_ids : list
All unique item ids
user_ids : list
All unique user ids
"""
df = | pd.read_parquet(aug_tt) | pandas.read_parquet |
import pandas as pd
import numpy as np
# import sklearn
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
#matplotlib
import matplotlib.pyplot as plt
# libraries used for train test split
import random
from math import floor
from types import SimpleNamespace
class keras_tools:
def __init__(self, data:pd.DataFrame, \
index = None,
features:list = [],
y_val = None,
ts_n_y_vals:int = None,
data_orientation:str = 'row',
debug:bool = False):
"""Setup the keras-rnn-tools helper class with passed variables
Args:
data (pd.DataFrame): base dataframe in pandas format.
index (str or int): if data_orientation='row' then index number or column name that should be used as the index of the resulting dataframe,
if data_orientation='column' then row index of row that should be used as index
features(list): if data_orientation='row' then list of integer indices or column names of the columns that should be used as features,
if data_orientation='column' then list of integer indices that should be used as features
y_val (str or pd.DataFrame, optional): target variable index or column name, only used for non-timeseries problems.
ts_n_y_vals (int): The number of y values to capture for each data set, only used for timeseries problems.
data_orientation (string): string specifying whether the data frame that is passed will need to be pivoted or not ('row' or 'column', row gets transposed for time-series problems)
debug (bool): indication of whether to output print values for debugging
"""
self.data = data
self.debug = debug
# check if y_val is populated
if y_val is not None:
self.y_val = y_val #TODO: add logic to split on y value
if isinstance(y_val, str):
print("passed y string")
elif isinstance(y_val, pd.DataFrame):
print("passed data frame")
# check if ts_n_y_vals is populated
elif ts_n_y_vals is not None:
self.ts_n_y_vals = ts_n_y_vals
if data_orientation == 'row':
if self.debug == True: print("Row-wise orientation")
# set index based on value passed
if index == None:
pass
elif isinstance(index, int):
self.data.index = self.data.iloc[:,index]
elif isinstance(index, str):
self.data.index = self.data[index]
else:
raise AttributeError(f"The index parameter passed ({index}) was not of type int or string")
if all(isinstance(n, int) for n in features):
print("all passed as integer")
self.data = self.data.iloc[:,features]
elif all(isinstance(n, str) for n in features):
print("all passed as str")
else:
raise AttributeError(f"The features {features} were not consistently of type int or string")
self.data = self.data.T
if self.debug == True: print(self.data)
elif data_orientation == 'column':
if self.debug == True: print("Column-wise orientation")
else:
raise AttributeError(f"Type {data_orientation} specified is not valid. Must be either 'column' or 'row'")
# if neither are populated then raise error
else:
raise AttributeError("Either y_val or ts_n_y_vals must be populated.")
# other variables used
self.scaler = "" # defined in scale()
# split df data
self.train_df = ""
self.test_df = ""
self.valid_df = ""
# transformed data as np
self.X_train = ""
self.X_test = ""
self.X_valid = ""
self.y_train = ""
self.y_test = ""
self.y_valid = ""
def _scale(self,
scaler = None,
output_scaler:bool = False):
"""Scale the data in the data set. Prescribe the same scaling to the test and validation data sets.
Args:
scaler (sklearn scaler or string, optional): optional scaling of data, passed as sklearn scaler or string of scaler type (minmax or standard).
output_scaler (bool, optional): Include the fit scaler in the output. Default is False.
"""
self.scaler = scaler
if isinstance(self.scaler, str):
if self.debug == True: print("scaler string")
if 'minmax' in self.scaler.lower():
self.scaler = MinMaxScaler()
elif 'standard' in self.scaler.lower():
self.scaler = StandardScaler()
else:
raise AttributeError("Invalid Scaler Type Passed (minmax or standard expected)")
elif self.scaler is not None:
if self.debug == True: print("scaler passed")
else:
if self.debug == True: print("no scaler passed")
raise AttributeError(f"Scaler type {scaler} was not sklearn scaler or string of ('minmax' or 'standard').")
print(f"running for size {self.train_df.iloc[:,:-int(self.ts_n_y_vals)].shape}")
# fit to training data
self.scaler.fit(self.train_df.iloc[:,:-int(self.ts_n_y_vals)].T)
# transform all the data in the data set
print(self.train_df)
self.train_df = pd.DataFrame(self.scaler.transform(self.train_df.T).T)
print(self.train_df)
# self.test_df = self.scaler.transform(self.test_df.T).T
# self.valid_df = self.scaler.transform(self.valid_df.T).T
if output_scaler: return self.scaler
def _chunk_data (self, df, output_labels = True, **kwargs):
"""Helper to split data into x and y based on the previously split data
Args:
df (object): Indication of the type of split to perform. Must be one of 'sequential', 'overlap', or 'sample'
output_labels (bool, optional): indicator for whether y values also need to be outputted after chunking
**kwargs:
Returns:
"""
# reassign the dictionary variables to variables in namespace for easy access
n = SimpleNamespace(**kwargs)
np_arr_list, y_arr_list = [], []
end = df.shape[1]
# loop through each step and create a new np array to add to list
for chunk_start in range(0, (end - n.sample_size - n.y_size + 1), n.step):
# get a chunk of x values and store to array
if self.debug == True: print("From {} to {}".format(chunk_start, chunk_start + n.sample_size))
np_chunk = np.array(df.iloc[:,(chunk_start):(chunk_start + n.sample_size)])
# add stored array to list
np_arr_list.append(np_chunk)
if output_labels:
if self.debug == True: print("Y samples from {} to {}".format((chunk_start + n.sample_size), (chunk_start + n.sample_size + n.y_size)))
y_df_chunk = df.iloc[:,(chunk_start + n.sample_size):(chunk_start + n.sample_size + n.y_size)]
y_np_chunk = np.array(y_df_chunk)
y_arr_list.append(y_np_chunk)
# stack all the x samples together
np_stacked_chunks = np.stack(np_arr_list)
x_reshaped = np.transpose(np_stacked_chunks, (0,2,1))
if output_labels:
# stack all the y samples together
y_np_stacked_chunks = np.stack(y_arr_list)
y_reshaped = y_np_stacked_chunks
return x_reshaped, y_reshaped
else:
return x_reshaped
def train_test_split(self,
split_type:str = 'sample',
split_pct:float = 0.3,
val_split_pct:float = 0.1,
fill_na:bool = True,
return_df:bool = False):
"""Create the base train-test-validation split for time-series data
Args:
split_type (str): Indication of the type of split to perform. Must be one of 'sequential', 'overlap', or 'sample'
split_pct (bool):
val_split_pct (bool, optional):
fill_na (bool): Replace all NAs with 0's, typical prep. Default is True.
return_df (bool): Option to instead return the data as a dataframe (useful for debugging). Default is False.
Returns:
"""
#### basic parameter checking
if split_pct < 0 or split_pct > 1:
raise AttributeError(f"split_pct must be between 0 and 1. {split_pct} passed.")
if val_split_pct < 0 or val_split_pct > 1:
raise AttributeError(f"val_split_pct must be between 0 and 1. {val_split_pct} passed.")
if fill_na==True:
self.data.fillna(0, inplace=True)
#### create split depending on split_type
if split_type == 'sequential':
if self.debug == True: print("sequential split")
train_test_split_num = floor(self.data.shape[1] * (1 - split_pct - val_split_pct))
test_val_split = floor(self.data.shape[1] * (1 - val_split_pct))
if self.debug == True: print("Split at {} and {}".format(train_test_split_num, test_val_split))
# print(self.data, train_test_split_num)
self.train_df = np.array(self.data.iloc[:, 0:train_test_split_num])
self.test_df = np.array(self.data.iloc[:, train_test_split_num:test_val_split])
if self.debug: print(f"train_df: {self.train_df}")
if val_split_pct > 0 and val_split_pct < 1:
# create validation variables
x_val_start = test_val_split
x_val_end = self.data.shape[1] - self.ts_n_y_vals
self.valid_df = np.array(self.data.iloc[:, test_val_split:])
if return_df: return self.train_df, self.test_df, self.valid_df
else:
if return_df: return self.train_df, self.test_df
elif split_type == 'overlap':
if self.debug == True: print("overlap split")
train_test_split_num = floor((self.data.shape[1] - self.ts_n_y_vals) * (1 - split_pct - val_split_pct))
test_val_split = floor((self.data.shape[1] - self.ts_n_y_vals) * (1 - val_split_pct))
# self._split_dfs()
self.train_df = self.data.iloc[:, 0:(train_test_split_num + self.ts_n_y_vals)]
self.test_df = self.data.iloc[:, (train_test_split_num):(test_val_split + self.ts_n_y_vals)]
if val_split_pct > 0 and val_split_pct < 1:
# create validation variables
x_val_start = test_val_split
x_val_end = self.data.shape[1] - self.ts_n_y_vals
self.valid_df = self.data.iloc[:, test_val_split:]
if return_df: return self.train_df, self.test_df, self.valid_df
else:
if return_df: return self.train_df, self.test_df
elif split_type == 'sample':
if self.debug == True: print("sample split")
# try to split by y_val first, move on if it's not set
try:
X_train, X_test, y_train, y_test = train_test_split(self.data, self.y_val, test_size=split_pct)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_split_pct/(1-split_pct))
except AttributeError:
# for time-series this method only works if you want to sample specific features and keep the full time-series
# split out test_df then remove rows from train_df
self.test_df = self.data.loc[self.data.sample(frac=split_pct, replace=False).index]
self.train_df = self.data.loc[~self.data.index.isin(self.test_df.index)]
# split out valid_df then remove the rows from train_df
self.valid_df = self.train_df.loc[self.train_df.sample(frac=val_split_pct, replace=False).index]
self.train_df = self.train_df.loc[~self.train_df.index.isin(self.valid_df.index)]
if val_split_pct > 0 and val_split_pct < 1:
if return_df: return self.train_df, self.test_df, self.valid_df
else:
if return_df: return self.train_df, self.test_df
return self.X_train
else:
raise AttributeError(f"Type {split_type} specified is not valid")
if self.debug == True: print(self.data)
def reshape_ts(self,
step:int = 1,
sample_size:int = 1,
scaler = None,
output_scaler:bool = False):
"""Transforms split data into format needed for RNN, optionally can scale the data as well.
Args:
step (int): The number of steps before you take another sample (e.g. [1,3,5,6,7,9] and step of 2 would return x values of [[1,3][5,6][7,9]])
sample_size (int): The number of samples you want to take for each value (e.g. [1,3,5,6,7,9] and sample_size of 3 would return x values of [[1,3,5][3,5,6][5,6,7][6,7,9]])
input_data (tuple of object, optional): if train/test/validation data was not split using the class, data can be added directly here.
return_as_df (bool): Option to instead return the data as a dataframe (useful for debugging). Default is False.
scaler (sklearn scaler or string, optional): optional scaling of data, passed as sklearn scaler or string of scaler type (minmax or standard).
output_scaler (bool, optional): Include the fit scaler in the output. Default is False.
Returns:
scaler (object): If output_scaler==True then the fit scaler used will be returned.
"""
if scaler != None:
self._scale(scaler = scaler, output_scaler = output_scaler)
x_reshaped, y_reshaped = self._chunk_data(self.train_df, step = step, sample_size = sample_size, y_size = self.ts_n_y_vals)
# get test data
x_reshaped_test, y_reshaped_test = self._chunk_data(self.test_df, step = step, sample_size = sample_size, y_size = self.ts_n_y_vals)
self.X_train = x_reshaped
self.y_train = y_reshaped
self.X_test = x_reshaped_test
self.y_test = y_reshaped_test
if len(self.valid_df)>1:
# create val data sets
x_reshaped_val, y_reshaped_val = self._chunk_data(self.valid_df, step = step, sample_size = sample_size, y_size = self.ts_n_y_vals)
self.X_valid = x_reshaped_val
self.y_valid = y_reshaped_val
if output_scaler == True:
return self.scaler
def get_input_shape(self):
return self.X_train.shape[1:3]
def unscale(self,
prediction_arr:object):
"""Given an array, unscales the data back to original numeric scale
Args:
prediction_arr (object): 2D array of variables to be unscaled (if array is 3D from predictions, use shape_predictions() first)
"""
pass
def predict_ts(self,
x_values,
y_values:object = None,
model:object = None,
predict_shape:str = '2d'):
"""Generates predictions from model for given x input
Args:
x_values ():
y_values (object): np.array of actuals if comparing values to actuals
model (object): object of Keras model, can be optional if a model has been passed in previous method
predict_shape (str): string indicator or '2d' or '3d' indicating how final layer of model is structured. See docs for more information.
Returns:
pred_list, actual_list (tuple of lists): Returns a list of predictions (returns a tuple of lists if y_values are provided for validation as well)
"""
predict_shape = predict_shape.lower()
pred_list = []
if y_values is not None: actual_list = y_values[:,:,0]
# predict future weeks
pred_list = self._train_iterative_preds(x_values, model)
# print('train predict {} and actual shape {}'.format(np.asarray(pred_list)[:,:,0].shape, np.asarray(actual_list).shape))
pred_list = self._reshape_pred_lists(np.asarray(pred_list), 'preds', date_first=True)
if y_values is not None:
actual_list = self._reshape_pred_lists(np.asarray(actual_list), 'actuals', date_first=True)
return pred_list, actual_list
else:
return pred_list
def _reshape_pred_lists (self, pred_list, column_name, date_first=True):
"""Generates predictions from model for given x input
Args:
pred_list (np.array): 2D or 3D array of predictions
column_name (str): name of the column that the prediction corresponds to
date_first(bool): Boolean specifying where the position of the date is in the passed array
"""
# reshape data
pred_list = np.asarray(pred_list)
pred_list = np.stack(pred_list)
if pred_list.ndim == 3:
pred_list = pd.DataFrame(pred_list[0,:,:])
else:
pred_list = | pd.DataFrame(pred_list) | pandas.DataFrame |
import pandas as pd
import utils
import equation_utils
from pumpsettings import PumpSettings
from sklearn.metrics import median_absolute_error, r2_score, mean_squared_error
def compute_statistics(y_true, y_predicted, k):
# Returns MAE, R^2, and RMSE values
mae = round(median_absolute_error(y_true, y_predicted), 2)
r_squared = round(r2_score(y_true, y_predicted), 3)
adjusted_r_2 = round(utils.adjusted_r_2(r_squared, len(y_predicted), k), 3)
rmse = round(mean_squared_error(y_true, y_predicted) ** 0.5, 2)
return (mae, r_squared, adjusted_r_2, rmse)
def run_equation_testing(
x_input_file_name,
y_input_file_name,
jaeb_equations,
traditional_fitted_equations,
traditional_constant_equations,
):
"""
Run equation testing, using the equations from an equation dict
input_file_name: name of file, without extension
jaeb_equations: PumpSettings object with equation data
traditional_equations: PumpSettings object with equation data
"""
x_data_path = utils.find_full_path(x_input_file_name, ".csv")
x_df = pd.read_csv(x_data_path)
y_data_path = utils.find_full_path(y_input_file_name, ".csv")
y_df = pd.read_csv(y_data_path)
df = | pd.concat([x_df, y_df], axis=1) | pandas.concat |
# ms_mint/peaklists.py
import os
import pandas as pd
import numpy as np
from .standards import PEAKLIST_COLUMNS, DEPRECATED_LABELS
from .helpers import dataframe_difference
from .tools import get_mz_mean_from_formulas
def read_peaklists(filenames, ms_mode='negative'):
'''
Extracts peak data from csv files that contain peak definitions.
CSV files must contain columns:
- 'peak_label': str, unique identifier
- 'mz_mean': float, center of mass to be extracted in [Da]
- 'mz_width': float, with of mass window in [ppm]
- 'rt_min': float, minimum retention time in [min]
- 'rt_max': float, maximum retention time in [min]
-----
Args:
- filenames: str or PosixPath or list of such with path to csv-file(s)
Returns:
pandas.DataFrame in peaklist format
'''
if isinstance(filenames, str):
filenames = [filenames]
peaklist = []
for fn in filenames:
if fn.endswith('.csv'):
df = | pd.read_csv(fn) | pandas.read_csv |
from pathlib import Path
import sys
import numpy as np
import pandas as pd
import collections
import deepdish as dd
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as ticker
import seaborn as sns
import pyomo.environ as pyo
import pyomo.dae as pyodae
# sys.path.append(r"G:\\Courses_UB\\Spring_19\\Optimal Controls\\Project\\casadi-windows-py38-v3.5.5-64bit")
# from casadi import *
from models.fixed_magnet_model import one_fixed_magnet
from models.two_moving_magnets_model import trajectory_optimization, optimize_tf_and_vel, user_provided_magnet_separation, maximize_vel_minimize_impulse, maximize_vel_minimize_time_impulse
from models.vsm_plots import plot_disp_vel, plot_settings_together, plot_pareto_front
from models.linear_robot_model import (robot_linear_dynamics, robot_linear_dynamics_optimize_tf, robot_linear_dynamics_optimize_tf_and_alpha)
from models.pyomo_optimizer import pyomo_solver
from data.pyomo_parse_data import extract_results, extract_results_for_optimal_tf
from utils import skip_run, parse_time
import yaml
import io
import csv
import matplotlib.pylab as pylab
from dateutil import parser
params = {'legend.fontsize': 12,
# 'figure.figsize': (10, 5),
'axes.labelsize': 16,
# 'axes.titlesize':'x-large',
'xtick.labelsize':14,
'ytick.labelsize':14}
pylab.rcParams.update(params)
# Load the configurations from the yml file
config = yaml.safe_load(io.open('src/config.yml'))
##############################################################
# Optimal velocity trajectory for hammering task (given Tf)
##############################################################
# --------- ICRA extension to IROS --------#
with skip_run('run', 'optimize_vel_given_tf') as check, check():
Tf = [0.3, 2.0]# [0.3, 0.5, 1.0, 1.5, 2.0]
Tf_folder = ['tf_03', 'tf_20']#['tf_05', 'tf_10', 'tf_15', 'tf_20']
counter = 0
for tf in Tf:
for setting in config['stiff_config']:
if (setting == 'high_stiffness'):
pyo_model = robot_linear_dynamics(tf, config)
else:
pyo_model = trajectory_optimization(tf, setting, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, setting, config)
# save the csv files
filepath = str(Path(__file__).parents[1] / config['csv_path']/ Tf_folder[counter] / setting) + '.csv'
data.to_csv(filepath, index=False)
plot_disp_vel(data, setting, config)
counter += 1
with skip_run('skip', 'plot_optimal_results_of_hammer_given_tf') as check, check():
Tf_folder = ['tf_03', 'tf_20'] #['tf_05', 'tf_10', 'tf_15', 'tf_20']
plot_magnet_disp = False
for folder_name in Tf_folder:
if plot_magnet_disp:
fig, ax = plt.subplots(3,1)
else:
fig, ax = plt.subplots(2,1)
if folder_name == 'tf_03' or folder_name == 'tf_05':
ylim = [-0.1, 0.1]
ax[0].set_ylabel('Displacement (m)')
ax[1].set_ylabel('Velocity (m/s)')
if plot_magnet_disp:
ax[2].set_ylabel('magnet (mm)')
else:
ylim = [-0.2, 0.1]
for setting in config['stiff_config']:
# load the data
filepath = str(Path(__file__).parents[1] / config['csv_path']/ folder_name / setting) + '.csv'
data = pd.read_csv(filepath, delimiter=',')
tf = data['time'].iloc[-1]
w = 0.03
if setting == 'high_stiffness':
linewidth = 2
else :
linewidth = 1.5
ax[0].plot(data['time'], data['bd']+data['hd'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[0].plot(data['time'], 0.05 + 0 * data['time'], 'k:')
ax[0].plot([0.8 * tf, 0.8 * tf], [-0.2, 0.1], 'k-.')
ax[0].grid()
ax[0].set_xlim([0, tf])
ax[0].set_ylim(ylim)
ax[0].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[1].plot(data['time'], data['bv']+data['hv'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1].plot([0.8 * tf, 0.8 * tf], [-1, 1.25], 'k-.')
ax[1].grid()
ax[1].set_xlim([0, tf])
ax[1].set_ylim([-1, 1.25])
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
if plot_magnet_disp:
ax[2].plot(data['time'], (data['md1'] + w) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[2].plot(data['time'], (data['md2'] - w) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[2].plot([0.8 * tf, 0.8 * tf], [-.040 * 1000, .040 * 1000], 'k-.')
ax[2].grid()
ax[2].set_xlim([0, tf])
ax[2].set_ylim([-.040 * 1000, .040 * 1000])
ax[2].set_xlabel('Time (s)')
# ax[2].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
for i in range(2):
ax[i].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
else:
ax[1].set_xlabel('Time (s)')
ax[0].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[0].legend(loc='lower right')
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=0.0)
with skip_run('skip', 'plot_optimal_results_of_robot_given_tf') as check, check():
Tf_folder = ['tf_03', 'tf_20'] #['tf_05', 'tf_10', 'tf_15', 'tf_20']
plot_magnet_disp = False
k = 0 # counter for plot axis
fig, ax = plt.subplots(2,2, figsize=(10,5))
for folder_name in Tf_folder:
if folder_name == 'tf_03' or folder_name == 'tf_05':
ylim = [-0.2, 0.1] #[-0.02, 0.08]
ax[0,k].set_ylabel('Displacement (m)')
ax[1,k].set_ylabel('magnet (mm)')
else:
ylim = [-0.2, 0.1]
for setting in config['stiff_config']:
# load the data
filepath = str(Path(__file__).parents[1] / config['csv_path']/ folder_name / setting) + '.csv'
data = pd.read_csv(filepath, delimiter=',')
tf = data['time'].iloc[-1]
w = 0.03
# if setting == 'high_stiffness':
# linewidth = 2
# else :
linewidth = 2
ax[0,k].plot(data['time'], data['bd'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[0,k].plot(data['time'], 0.05 + 0 * data['time'], 'k:')
ax[0,k].plot([0.8 * tf, 0.8 * tf], [-0.2, 0.1], 'k-.')
ax[0,k].grid()
ax[0,k].set_xlim([0, tf])
ax[0,k].set_ylim(ylim)
ax[0,k].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# ax[0,k].xaxis.set_minor_locator(ticker.MultipleLocator(0.2))
# ax[0,k].xaxis.set_major_locator(ticker.MultipleLocator(1))
ax[1,k].plot(data['time'], (data['md1'] + 0.03) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1,k].plot(data['time'], (data['md2'] - 0.03) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1,k].plot([0.8 * tf, 0.8 * tf], [-.035 * 1000, .035 * 1000], 'k-.')
ax[1,k].grid()
ax[1,k].set_xlim([0, tf])
ax[1,k].set_ylim([-.035 * 1000, .035 * 1000])
ax[1,k].set_xlabel('Time (s)')
ax[1,k].xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[0,k].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[0,k].legend(loc='lower right')
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=0.1)
k += 1
# remove the yticks for the second plot
for j in range(0,2):
ax[j,1].tick_params(axis='y',
which='both',
left=False,
# top=False, # ticks along the top edge are off
labelleft=False)
ax[j,0].set_xticks(np.arange(0, 0.3 + 0.1, step=0.1))
ax[j,1].set_xticks(np.arange(0, 2.0 + 0.5, step=0.5))
with skip_run('skip', 'plot_energy_stored') as check, check():
Tf_folder = ['tf_03', 'tf_20', 'optimal_tf'] #['tf_05', 'tf_10', 'tf_15', 'tf_20']
for folder_name in Tf_folder:
fig, ax = plt.subplots(2,1)
ax[0].set_ylabel('Power (W)')
ax[1].set_ylabel('Cummulative Energy (J)')
for setting in config['stiff_config']:
# load the data
filepath = str(Path(__file__).parents[1] / config['csv_path']/ folder_name / setting) + '.csv'
data = pd.read_csv(filepath, delimiter=',')
if folder_name == 'optimal_tf':
tf = 2.0
else:
tf = data['time'].iloc[-1]
w = 0.03
linewidth = 1.5
Sep1 = data['hd'] - data['md1'] - w
Sep2 = data['md2'] - data['hd'] - w
# calculate the power stored in the magnets
force = config['C1'] * (np.exp(-config['C2'] * Sep2) - np.exp(-config['C2'] * Sep1))
power = np.multiply(force, data['hv'])
# calculate the cummulative energy
energy = np.zeros(power.shape)
for count, _ in enumerate(power):
energy[count] = np.trapz(abs(power[:count]), data['time'][:count])
ax[0].plot(data['time'], power, config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[0].grid()
ax[0].set_xlim([0, tf])
ax[0].set_ylim([np.min(power), np.max(power)])
ax[0].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[1].plot(data['time'], energy, config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
if folder_name != 'optimal_tf':
ax[1].plot([0.8 * tf, 0.8 * tf], [0, np.max(energy)], 'k-.')
ax[1].grid()
ax[1].set_xlim([0, tf])
ax[1].set_ylim([0, np.max(energy)])
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[1].set_xlabel('Time (s)')
ax[1].legend(loc='upper left')
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=0.0)
# Time sweep optimal solution
with skip_run('skip', 'trajectory_optimization_time_sweep') as check, check():
Tf = np.around(np.arange(0.25, 5.0, 0.25), decimals=2)
sweep_data = collections.defaultdict()
max_velocity = collections.defaultdict()
fig = plt.plot()
for tf in Tf:
Data = collections.defaultdict()
list_vel = []
for setting in config['stiff_config']:
if (setting == 'high_stiffness'):
pyo_model = robot_linear_dynamics(tf, config)
else:
pyo_model = trajectory_optimization(tf, setting, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, setting, config)
Data[setting] = data.to_dict()
# optimal velocity value at alpha * tf
time = data['time']
vel = data['hv'] + data['bv']
optimal_time = time[-1:]
optimal_vel = max(vel)
if setting == 'low_stiffness':
plt.plot(tf, optimal_vel, 'rD')
elif setting == 'high_stiffness':
plt.plot(tf, optimal_vel, 'bD')
else:
plt.plot(tf, optimal_vel, 'kD')
list_vel.append(optimal_vel)
sweep_data[tf] = Data
max_velocity[tf] = vel
# save the complete dictionary
filepath = str(Path(__file__).parents[1] / config['two_moving_mags_time_sweep_data'])
dd.io.save(filepath, sweep_data)
# save the maximum velocity for each tf
filepath = str(Path(__file__).parents[1] / config['max_vel_time_sweep'])
dd.io.save(filepath, max_velocity)
with skip_run('skip', 'load_the_saved_tf_sweep_data') as check, check():
# load the data
filepath = str(Path(__file__).parents[1] / config['two_moving_mags_time_sweep_data'])
sweep_data = dd.io.load(filepath)
Tf = np.around(np.arange(0.5, 5.0, 0.25), decimals=2)
# sns.set()
_,ax1 = plt.subplots(2,1)
_,ax2 = plt.subplots(2,1)
_,ax3 = plt.subplots(2,1)
ax1[0].set_xlim([0, 5.0])
ax1[1].set_xlim([0, 5.0])
ax2[0].set_xlim([0, 5.0])
ax2[1].set_xlim([0, 5.0])
for tf in Tf:
data = sweep_data[tf]
time_vs = [i for i in data['variable_stiffness']['time'].values()]
base_vs = [i for i in data['variable_stiffness']['bv'].values()]
vel_vs = [i+j for i,j in zip(data['variable_stiffness']['hv'].values(), data['variable_stiffness']['bv'].values())]
disp_vs = [i+j for i,j in zip(data['variable_stiffness']['hd'].values(), data['variable_stiffness']['bd'].values())]
time_ls = [i for i in data['low_stiffness']['time'].values()]
base_ls = [i for i in data['low_stiffness']['bv'].values()]
vel_ls = [i+j for i,j in zip(data['low_stiffness']['hv'].values(), data['low_stiffness']['bv'].values())]
disp_ls = [i+j for i,j in zip(data['low_stiffness']['hd'].values(), data['low_stiffness']['bd'].values())]
# data_vs = np.array([time_vs, vel_vs]).reshape(time_vs.shape[0],2)
# data_ls = np.array([time_vs, vel_vs]).reshape(time_vs.shape[0],2)
ax1[0].plot(time_ls, vel_ls)
ax1[1].plot(time_vs, vel_vs)
ax2[0].plot(time_ls, disp_ls)
ax2[1].plot(time_vs, disp_vs)
ax3[0].plot(tf, max(base_ls), 'ko')
ax3[0].plot(tf, max(vel_ls), 'rd')
ax3[1].plot(tf, max(base_vs), 'ko')
ax3[1].plot(tf, max(vel_vs), 'bd')
# data = data['variable_stiffness']
# sns.relplot(x="time", y="hv", kind="line", data=data)
ax1[0].set_ylim([-1.0, 1.0])
ax1[1].set_ylim([-1.0, 1.0])
##############################################################
# Time optimal hammering task for two moving magnets
##############################################################
# --------- Used for IROS paper ---------- #
# Time optimal trajectory evaluation - 3 Weights
with skip_run('skip', 'optimize_Tf_and_Vel') as check, check():
Data = collections.defaultdict()
counter = 0
exps = ['optimal_tf'] #['weights_1', 'weights_2', 'weights_3']
# weights for multi-objective optimization
weights = [[0.5,0.5]] #[[0.9, 0.1], [0.5, 0.5], [0.1, 0.9]]
for weight in weights:
for setting in config['stiff_config']:
# if (setting == 'high_stiffness'):
# pyo_model= robot_linear_dynamics_optimize_tf(weight, config)
# else:
pyo_model = optimize_tf_and_vel( setting, weight, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results_for_optimal_tf(solved_model, setting, config)
print("Tf value for " + setting + " is: " + str(tf))
plot_disp_vel(data, 'Both_magnets_moving', config)
plt.suptitle('Tf: ' + str(tf) + ', Stiffness: ' + setting)
# save the csv files
filepath = str(Path(__file__).parents[1] / config['csv_path']/ exps[counter] / setting) + '.csv'
data.to_csv(filepath, index=False)
Data[setting] = data.to_dict()
counter += 1
plot_settings_together(Data, config)
# save the optimal data
# filepath = str(Path(__file__).parents[1] / config['two_moving_mags_optim_tf_data'])
# dd.io.save(filepath, Data)
with skip_run('skip', 'plot_the_optimal_values_LS_VS') as check, check():
exps = ['optimal_tf'] #['weights_1', 'weights_2', 'weights_3']
counter = 0
plot_magnet_disp = False
for i in exps:
if plot_magnet_disp:
fig, ax = plt.subplots(3,1)
else:
fig, ax = plt.subplots(2,1)
# plt.tight_layout()
# fig.subplots_adjust(hspace=0.10, wspace=2.25)
filepath1 = str(Path(__file__).parents[1] / config['csv_path'] / i / 'low_stiffness') + '.csv'
data1 = pd.read_csv(filepath1, delimiter=',')
filepath2 = str(Path(__file__).parents[1] / config['csv_path'] / i / 'variable_stiffness') + '.csv'
data2 = pd.read_csv(filepath2, delimiter=',')
filepath3 = str(Path(__file__).parents[1] / config['csv_path'] / i / 'high_stiffness') + '.csv'
data3 = pd.read_csv(filepath3, delimiter=',')
ax[0].plot(data1['time'], data1['hd'] + data1['bd'], 'b-', label='LS', linewidth=1.5)
ax[0].plot(data2['time'], data2['hd'] + data2['bd'], 'r-' , label='VS', linewidth=1.5)
ax[0].plot(data3['time'], data3['hd'] + data3['bd'], 'g-', label='HS', linewidth=2)
ax[0].plot(data1['time'], 0.05 + 0 * data1['time'], 'k:')
ax[0].plot([0.8 * data1['time'].iloc[-1], 0.8 * data1['time'].iloc[-1]], [-0.2, 0.1], 'k-.')
ax[0].plot([0.8 * data2['time'].iloc[-1], 0.8 * data2['time'].iloc[-1]], [-0.2, 0.1], 'k-.')
ax[0].plot([0.8 * data3['time'].iloc[-1], 0.8 * data3['time'].iloc[-1]], [-0.2, 0.1], 'k-.')
# ax[0].plot([max(data1['time']), max(data1['time'])], [-0.2, 0.1], 'b:')
# ax[0].plot([max(data2['time']), max(data2['time'])], [-0.2, 0.1], 'r:')
# ax[0].plot([max(data3['time']), max(data3['time'])], [-0.2, 0.1], 'g:')
ax[0].set_xlim([0, max(max(data1['time']), max(data2['time']))])
ax[0].set_ylim([-0.2, 0.1])
ax[0].legend(loc='lower right')
ax[0].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[1].plot(data1['time'], data1['hv'] + data1['bv'], 'b-', label='LS', linewidth=1.5)
ax[1].plot(data2['time'], data2['hv'] + data2['bv'], 'r-', label='VS', linewidth=1.5)
ax[1].plot(data3['time'], data3['hv'] + data3['bv'], 'g-', label='HS', linewidth=1.5)
ax[1].plot([0.8 * data1['time'].iloc[-1], 0.8 * data1['time'].iloc[-1]], [-1.2, 1.2], 'k-.')
ax[1].plot([0.8 * data2['time'].iloc[-1], 0.8 * data2['time'].iloc[-1]], [-1.2, 1.2], 'k-.')
ax[1].plot([0.8 * data3['time'].iloc[-1], 0.8 * data3['time'].iloc[-1]], [-1.2, 1.2], 'k-.')
# ax[1].plot([max(data1['time']), max(data1['time'])], [-1.2, 1.2], 'b:')
# ax[1].plot([max(data2['time']), max(data2['time'])], [-1.2, 1.2], 'r:')
# ax[1].plot([max(data3['time']), max(data3['time'])], [-1.2, 1.2], 'g:')
# ax[1].plot(data1['time'], 0.5 + 0 * data1['time'], 'k--')
# ax[1].plot(data1['time'], -0.5 + 0 * data1['time'], 'k--')
ax[1].set_xlim([0, max(max(data1['time']), max(data2['time'])) ])
ax[1].set_ylim([-1.2, 1.2])
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
if plot_magnet_disp:
ax[2].plot(data1['time'], 1000 * ( data1['md2'] - 0.03), 'b-', linewidth=1.5)
ax[2].plot(data1['time'], 1000 * (-data1['md2'] + 0.03), 'b-', linewidth=1.5)
ax[2].plot(data2['time'], 1000 * ( data2['md2'] - 0.03), 'r-', linewidth=1.5)
ax[2].plot(data2['time'], 1000 * (-data2['md2'] + 0.03), 'r-', linewidth=1.5)
ax[2].plot(data3['time'], 1000 * ( data3['md2'] - 0.03), 'g-', linewidth=1.5)
ax[2].plot(data3['time'], 1000 * (-data3['md2'] + 0.03), 'g-', linewidth=1.5)
# ax[2].plot([max(data1['time']), max(data1['time'])], [-0.06, 0.06], 'b--')
# ax[2].plot([max(data2['time']), max(data2['time'])], [-0.06, 0.06], 'r--')
ax[2].set_xlim([0, max(max(data1['time']), max(data2['time'])) ])
ax[2].set_xlabel('Time (s)')
ax[2].plot([0.8 * data1['time'].iloc[-1], 0.8 * data1['time'].iloc[-1]], [-1000 * 0.045, 1000 * 0.045], 'k-.')
ax[2].plot([0.8 * data2['time'].iloc[-1], 0.8 * data2['time'].iloc[-1]], [-1000 * 0.045, 1000 * 0.045], 'k-.')
ax[2].plot([0.8 * data3['time'].iloc[-1], 0.8 * data3['time'].iloc[-1]], [-1000 * 0.045, 1000 * 0.045], 'k-.')
for i in range(2):
ax[i].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
else:
ax[1].set_xlabel('Time (s)')
ax[0].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
if counter < 1:
ax[0].set_ylabel('Displacement (m)')
ax[1].set_ylabel('Velocity (m/s)')
if plot_magnet_disp:
ax[2].set_ylabel('Magnet (m)')
ax[2].grid()
# if counter == 1:
# _, ax1 = plt.subplots(2,1)
# ax1[0].plot(data1['time'], data1['bd'], 'k-.')
# ax1[0].plot(data2['time'], data2['bd'], 'k-.')
# ax1[0].plot(data1['time'], data1['bd'] + data1['hd'], 'b-')
# ax1[0].plot(data2['time'], data2['bd'] + data2['hd'], 'r-')
# ax1[1].plot(data1['time'], data1['bv'], 'k-.')
# ax1[1].plot(data2['time'], data2['bv'], 'k-.')
# ax1[1].plot(data1['time'], data1['bv'] + data1['hv'], 'b-')
# ax1[1].plot(data2['time'], data2['bv'] + data2['hv'], 'r-')
counter += 1
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=2.25)
ax[0].grid()
ax[1].grid()
# Time optimal trajectory evaluation - All weights for pareto front
with skip_run('skip', 'optimize_tf_and_vel_all_weights') as check, check():
Data = collections.defaultdict()
Pareto = collections.defaultdict()
counter = 0
# weights for multi-objective optimization
weights = [[0.9, 0.1], [0.8, 0.2], [0.7, 0.3], [0.6, 0.4], [0.5, 0.5], [0.4, 0.6], [0.3, 0.7], [0.2, 0.8], [0.1, 0.9]]
for weight in weights:
temp = collections.defaultdict()
for setting in config['stiff_config']:
if (setting == 'high_stiffness'):
pyo_model= robot_linear_dynamics_optimize_tf(weight, config)
else:
pyo_model = optimize_tf_and_vel( setting, weight, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results_for_optimal_tf(solved_model, setting, config)
print("Tf value for " + setting + " is: " + str(tf))
# save the csv files
filepath = str(Path(__file__).parents[1] / config['pareto_path'] / str(counter) / setting) + '.csv'
data.to_csv(filepath, index=False)
temp_time = data['time']
temp['t_hit'] = temp_time[temp_time == 0.8*tf]
temp['tf'] = tf
temp[setting] = data['bv'][temp_time == 0.8*tf] + data['hv'][temp_time == 0.8*tf]
counter += 1
Pareto[counter] = temp
# path to save the data
filepath = str(Path(__file__).parents[1] / config['pareto_front'])
dd.io.save(filepath, Pareto)
with skip_run('skip', 'plot_the_pareto_front') as check, check():
# path to save the data
filepath = str(Path(__file__).parents[1] / config['pareto_front'])
pareto_data = dd.io.load(filepath)
plot_pareto_front(pareto_data, config)
# Optimal solutions for constant separations
with skip_run('skip', 'data_for_two_magnets_sep_sweep') as check, check():
tf = 1.0
Separations = np.around(np.arange(0.035, 0.06, 0.01), decimals=3)
df = pd.DataFrame()
for sep in Separations:
pyo_model = user_provided_magnet_separation(tf, sep, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, '', config)
temp = pd.DataFrame({'sep': ["sep_" + str(round(i,3)) for i in data['md2']]})
data = data.join(temp, lsuffix='_caller', rsuffix='_other')
tol_vel = pd.DataFrame({'total_vel' : data['bv'] + data['hv']})
data = data.join(tol_vel, lsuffix='_caller', rsuffix='_other')
tol_disp= pd.DataFrame({'total_disp' : data['bd'] + data['hd']})
data = data.join(tol_disp, lsuffix='_caller', rsuffix='_other')
if df.size > 0:
df = df.append(data, ignore_index=True)
else:
df = data
# save the data
filepath = str(Path(__file__).parents[1] / config['two_moving_mags_sep_sweep_data'])
df.to_hdf(filepath, key='df', mode='w')
with skip_run('skip', 'plot_sep_sweep_data') as check, check():
# load the data
filepath = str(Path(__file__).parents[1] / config['two_moving_mags_sep_sweep_data'])
df = pd.read_hdf(filepath, key='df')
fig,ax = plt.subplots(2,1)
sns_palette = sns.color_palette("viridis", n_colors=3, desat=0.5)
# sns_palette = sns.cubehelix_palette(5, start=2, rot=-.75)
g1 = sns.relplot(x="time", y="total_vel", hue="sep", kind="line", palette=sns_palette, data=df, ax=ax[0])
g2 = sns.relplot(x="time", y="total_disp", hue="sep", kind="line", palette=sns_palette, data=df, ax=ax[1])
# plt.close(g1.fig)
# plt.close(g2.fig)
plt.tight_layout()
# Optimal solution for given Tf
with skip_run('skip', 'run_trajectory_optimization_of_the_robot_given_tf') as check, check():
pyo_model = robot_linear_dynamics(0.5, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, setting, config)
_,ax = plt.subplots(2,1)
ax[0].plot(data['time'], data['bd'], 'b-', label = '0.5 s')
ax[0].plot([0.5, 0.5], [-0.1, 0.08], 'k--')
ax[0].plot([3, 3], [-0.1, 0.08], 'k--')
ax[1].plot(data['time'], data['bv'], 'b-', label = '0.5 s')
ax[1].plot([0.5, 0.5], [-0.5, 0.5], 'k--')
ax[1].plot([3, 3], [-0.5, 0.5], 'k--')
pyo_model = robot_linear_dynamics(3, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, setting, config)
ax[0].plot(data['time'], data['bd'], 'r-', label = '3.0 s')
ax[0].set_ylabel('Displacement (m)')
ax[0].grid()
ax[0].legend(loc='upper center')
ax[1].plot(data['time'], data['bv'], 'r-', label = '3.0 s')
# ax[1].plot(data['time'], 0.5 + 0*data['time'], 'k-')
# ax[1].plot(data['time'], -0.5 + 0*data['time'], 'k-')
ax[1].set_xlabel('Time (s)')
ax[1].set_ylabel('Velocity (m/s)')
ax[1].grid()
plt.tight_layout()
with skip_run('skip', 'run_trajectory_optimization_of_the_robot_for_tf') as check, check():
alphas = np.round(np.arange(0, 1.0, 0.1), decimals=1)
weights = [0.5, 0.5]
for alpha in alphas:
pyo_model= robot_linear_dynamics_optimize_tf(weight, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results_for_optimal_tf(solved_model, setting, config)
_,ax = plt.subplots(3,1)
ax[0].plot(data['t'], data['bd'])
ax[0].set_xlabel('Time (s)')
ax[0].set_ylabel('Displacement (m)')
ax[1].plot(data['t'], data['bv'])
ax[1].set_xlabel('Time (s)')
ax[1].set_ylabel('Velocity (m/s)')
ax[2].plot(data['t'], data['ba'])
ax[2].set_xlabel('Time (s)')
ax[2].set_ylabel('Acceleration (m/s)')
#TODO:
# plot the pareto front for optimal values corresponding to time and velocity of the robot
##############################################################
# Impulse and velocity optimization
##############################################################
# --------- Extension of IROS paper ---------- #
# Optimal velocity trajectory given Tf
with skip_run('skip', 'optimize_vel_given_tf_using_impulse') as check, check():
Tf = [0.3, 2.0]# [0.3, 0.5, 1.0, 1.5, 2.0]
Tf_folder = ['tf_03', 'tf_20']#['tf_05', 'tf_10', 'tf_15', 'tf_20']
counter = 0
for tf in Tf:
for setting in config['stiff_config']:
print('Tf: {}, {}'.format(tf, setting))
if (setting == 'high_stiffness'):
pyo_model = robot_linear_dynamics(tf, config)
else:
pyo_model = maximize_vel_minimize_impulse(tf, setting, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results(solved_model, setting, config)
# save the csv files
filepath = str(Path(__file__).parents[1] / config['csv_path']/ 'impulse' / Tf_folder[counter] / setting) + '.csv'
data.to_csv(filepath, index=False)
plot_disp_vel(data, setting, config)
counter += 1
with skip_run('skip', 'plot_optimal_results_of_hammer_given_tf') as check, check():
Tf_folder = ['tf_03', 'tf_20'] #['tf_05', 'tf_10', 'tf_15', 'tf_20']
plot_magnet_disp = False
for folder_name in Tf_folder:
if plot_magnet_disp:
fig, ax = plt.subplots(3,1)
else:
fig, ax = plt.subplots(2,1)
if folder_name == 'tf_03' or folder_name == 'tf_05':
ylim = [-0.1, 0.1]
ax[0].set_ylabel('Displacement (m)')
ax[1].set_ylabel('Velocity (m/s)')
if plot_magnet_disp:
ax[2].set_ylabel('magnet (mm)')
else:
ylim = [-0.2, 0.1]
for setting in config['stiff_config']:
# load the data
filepath = str(Path(__file__).parents[1] / config['csv_path']/ folder_name / setting) + '.csv'
data = pd.read_csv(filepath, delimiter=',')
tf = data['time'].iloc[-1]
w = 0.03
if setting == 'high_stiffness':
linewidth = 2
else :
linewidth = 1.5
ax[0].plot(data['time'], data['bd']+data['hd'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[0].plot(data['time'], 0.05 + 0 * data['time'], 'k:')
ax[0].plot([0.8 * tf, 0.8 * tf], [-0.2, 0.1], 'k-.')
ax[0].grid()
ax[0].set_xlim([0, tf])
ax[0].set_ylim(ylim)
ax[0].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[1].plot(data['time'], data['bv']+data['hv'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1].plot([0.8 * tf, 0.8 * tf], [-1, 1.25], 'k-.')
ax[1].grid()
ax[1].set_xlim([0, tf])
ax[1].set_ylim([-1, 1.25])
ax[1].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
if plot_magnet_disp:
ax[2].plot(data['time'], (data['md1'] + w) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[2].plot(data['time'], (data['md2'] - w) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[2].plot([0.8 * tf, 0.8 * tf], [-.040 * 1000, .040 * 1000], 'k-.')
ax[2].grid()
ax[2].set_xlim([0, tf])
ax[2].set_ylim([-.040 * 1000, .040 * 1000])
ax[2].set_xlabel('Time (s)')
# ax[2].yaxis.set_major_formatter(FormatStrFormatter('%.3f'))
for i in range(2):
ax[i].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
else:
ax[1].set_xlabel('Time (s)')
ax[0].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[0].legend(loc='lower right')
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=0.0)
with skip_run('skip', 'plot_optimal_results_of_robot_given_tf') as check, check():
Tf_folder = ['tf_03', 'tf_20'] #['tf_05', 'tf_10', 'tf_15', 'tf_20']
plot_magnet_disp = False
k = 0 # counter for plot axis
fig, ax = plt.subplots(2,2, figsize=(10,5))
for folder_name in Tf_folder:
if folder_name == 'tf_03' or folder_name == 'tf_05':
ylim = [-0.2, 0.1] #[-0.02, 0.08]
ax[0,k].set_ylabel('Displacement (m)')
ax[1,k].set_ylabel('magnet (mm)')
else:
ylim = [-0.2, 0.1]
for setting in config['stiff_config']:
# load the data
filepath = str(Path(__file__).parents[1] / config['csv_path']/ folder_name / setting) + '.csv'
data = pd.read_csv(filepath, delimiter=',')
tf = data['time'].iloc[-1]
w = 0.03
# if setting == 'high_stiffness':
# linewidth = 2
# else :
linewidth = 2
ax[0,k].plot(data['time'], data['bd'],config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[0,k].plot(data['time'], 0.05 + 0 * data['time'], 'k:')
ax[0,k].plot([0.8 * tf, 0.8 * tf], [-0.2, 0.1], 'k-.')
ax[0,k].grid()
ax[0,k].set_xlim([0, tf])
ax[0,k].set_ylim(ylim)
ax[0,k].yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# ax[0,k].xaxis.set_minor_locator(ticker.MultipleLocator(0.2))
# ax[0,k].xaxis.set_major_locator(ticker.MultipleLocator(1))
ax[1,k].plot(data['time'], (data['md1'] + 0.03) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1,k].plot(data['time'], (data['md2'] - 0.03) * 1000,config[setting]['plot_style'], label=config[setting]['plot_label'], linewidth=linewidth)
ax[1,k].plot([0.8 * tf, 0.8 * tf], [-.035 * 1000, .035 * 1000], 'k-.')
ax[1,k].grid()
ax[1,k].set_xlim([0, tf])
ax[1,k].set_ylim([-.035 * 1000, .035 * 1000])
ax[1,k].set_xlabel('Time (s)')
ax[1,k].xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax[0,k].tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
ax[0,k].legend(loc='lower right')
plt.tight_layout()
plt.subplots_adjust(hspace=0.10, wspace=0.1)
k += 1
# remove the yticks for the second plot
for j in range(0,2):
ax[j,1].tick_params(axis='y',
which='both',
left=False,
# top=False, # ticks along the top edge are off
labelleft=False)
ax[j,0].set_xticks(np.arange(0, 0.3 + 0.1, step=0.1))
ax[j,1].set_xticks(np.arange(0, 2.0 + 0.5, step=0.5))
# Time optimal trajectory evaluation using Impulse information
with skip_run('skip', 'optimize_Tf_and_Vel_using_impulse') as check, check():
Data = collections.defaultdict()
counter = 0
exps = ['optimal_tf'] #['weights_1', 'weights_2', 'weights_3']
# weights for multi-objective optimization
weights = [[0.5,0.5]] #[[0.9, 0.1], [0.5, 0.5], [0.1, 0.9]]
for weight in weights:
for setting in config['stiff_config']:
# if (setting == 'high_stiffness'):
# pyo_model= robot_linear_dynamics_optimize_tf(weight, config)
# else:
pyo_model = maximize_vel_minimize_time_impulse(setting, weight, config)
# Solve the optimization problem and return the optimal results
solved_model = pyomo_solver(pyo_model, 'ipopt', config, neos=False)
data, _ , tf = extract_results_for_optimal_tf(solved_model, setting, config)
print("Tf value for " + setting + " is: " + str(tf))
plot_disp_vel(data, 'Both_magnets_moving', config)
plt.suptitle('Tf: ' + str(tf) + ', Stiffness: ' + setting)
# save the csv files
filepath = str(Path(__file__).parents[1] / config['csv_path']/ exps[counter] / setting) + '.csv'
data.to_csv(filepath, index=False)
Data[setting] = data.to_dict()
counter += 1
plot_settings_together(Data, config)
# save the optimal data
# filepath = str(Path(__file__).parents[1] / config['two_moving_mags_optim_tf_data'])
# dd.io.save(filepath, Data)
with skip_run('skip', 'plot_the_optimal_values_LS_VS') as check, check():
exps = ['optimal_tf'] #['weights_1', 'weights_2', 'weights_3']
counter = 0
plot_magnet_disp = False
for i in exps:
if plot_magnet_disp:
fig, ax = plt.subplots(3,1)
else:
fig, ax = plt.subplots(2,1)
# plt.tight_layout()
# fig.subplots_adjust(hspace=0.10, wspace=2.25)
filepath1 = str(Path(__file__).parents[1] / config['csv_path'] / i / 'low_stiffness') + '.csv'
data1 = pd.read_csv(filepath1, delimiter=',')
filepath2 = str(Path(__file__).parents[1] / config['csv_path'] / i / 'variable_stiffness') + '.csv'
data2 = | pd.read_csv(filepath2, delimiter=',') | pandas.read_csv |
"""Accessors to Pandas DataFrame interpreting metadata in the column index.
Two versions: 'ms' assumes labeled data, 'ums' assumes unlabeled data"""
from collections import namedtuple
import numpy as np
import pandas as pd
from pandas_flavor import register_dataframe_accessor
from .utils import _is_string
def create_multiindex_with_labels(df, labels=["no label"], level_name="label"):
cols = df.columns
n = len(cols)
metanames = cols.names
if not labels:
labels = ["no label"]
elif _is_string(labels):
labels = [labels]
else:
labels = list(labels)
nr = n // len(labels)
newstrs = []
for s in labels:
newstrs.extend([s] * nr)
if len(metanames) > 1:
tcols = [list(c) for c in cols.to_flat_index()]
else:
tcols = [[c] for c in cols]
newcols = [tuple([ns] + c) for (ns, c) in zip(newstrs, tcols)]
return pd.MultiIndex.from_tuples(newcols, names=[level_name] + metanames)
@register_dataframe_accessor("cdl")
class CDLAccessor(object):
"""An accessor to Pandas DataFrame to interpret content as column organized, labeled data.
This interpretation assumes that the **column** index stores the essential
metadata, namely, sample names and group labels. This index is
ususally hierarquical and other levels are optional. Accessor 'ums' for unlabeled data
where level 0 are assumed to be sample names is also available in this module.
Interpretation is based on the following conventions :
- For the accessor to work, the column index must have at leat two levels.
- Level 1 is interpreted as sample names by the accessor.
Default name for this level is 'sample' and default values are 'Sample {i}'.
.samples is a property to access this level.
- Level 0 is interpreted as labels. .labels is a property to access labels.
- More levels are possible, if they are read from data sources or added by Pandas index manipulation.
The (row) index is interpreted as "features", often labels of spectral entities. Examples are
m/z values, formulae or any format-specific labeling scheme. It may be hierarquical.
"""
def __init__(self, df):
self._validate(df)
self._df = df
@staticmethod
def _validate(df):
"""Require a pandas DataFrame with at least two levels in column MultiIndex to work."""
if not isinstance(df, pd.DataFrame):
raise AttributeError("'cdl' must be used with a Pandas DataFrame")
if len(df.columns.names) < 2:
raise AttributeError(
"Must have at least label and sample metadata on columns"
)
def _get_zip_labels_samples(self):
self._df.columns = self._df.columns.remove_unused_levels()
return zip(
self._df.columns.get_level_values(0), self._df.columns.get_level_values(1)
)
@property
def unique_labels(self):
"""Get the different data labels (with no repetitions)."""
return tuple(pd.unique(self.labels))
@property
def labels(self):
"""iterate over labels of each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._df.columns.get_level_values(0)
@labels.setter
def labels(self, value):
"""Setter for data labels."""
self._rebuild_col_level(value, 0)
@property
def label_count(self):
"""Get the number of labels."""
# 'no label' still counts as one (global) label
return len(self.unique_labels)
@property
def unique_samples(self):
"""Get the different sample names (with no repetitions in case the number of levels > 2)."""
return tuple(pd.unique(self.samples))
@property
def samples(self):
"""iterate over sample names of each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._df.columns.get_level_values(1)
@samples.setter
def samples(self, value):
"""Setter for sample names."""
self._rebuild_col_level(value, 1)
@property
def sample_count(self):
"""Get the number of samples."""
return len(self.unique_samples)
def _rebuild_col_level(self, value, level):
cols = self._df.columns.remove_unused_levels()
n = len(cols)
metanames = cols.names
# handle value
if value is None or len(value) == 0:
if level == 0:
value = ["no label"]
elif level == 1:
value = [f"Sample {i}" for i in range(1, n + 1)]
else:
value = [f"Info {i}" for i in range(1, n + 1)]
elif _is_string(value):
value = [value]
else:
value = list(value)
nr = n // len(value)
newstrs = []
for s in value:
newstrs.extend([s] * nr)
cols = [list(c) for c in cols]
for i, s in enumerate(newstrs):
cols[i][level] = s
newcols = [tuple(c) for c in cols]
self._df.columns = pd.MultiIndex.from_tuples(newcols, names=metanames)
@property
def feature_count(self):
"""Get the number of features."""
return len(self._df.index)
@property
def iter_labels_samples(self):
"""iterate over pairs of (label, sample name) for each DataFrame column."""
self._df.columns = self._df.columns.remove_unused_levels()
return self._get_zip_labels_samples()
@property
def no_labels(self):
"""True if there is only one (global) label 'no label'."""
return self.label_count == 1 and self.labels[0] == "no label"
def info(self, all_data=False):
"""A dicionary of global counts or a DataFrame with info for each sample"""
if all_data:
return dict(
samples=self.sample_count,
labels=self.label_count,
features=self.feature_count,
)
ls_table = [(s, l) for (l, s) in self._get_zip_labels_samples()]
ls_table.append((self.sample_count, self.label_count))
indx_strs = [str(i) for i in range(self.sample_count)] + ["global"]
return pd.DataFrame(ls_table, columns=["sample", "label"], index=indx_strs)
def label_of(self, sample):
"""Get label from sample name"""
for lbl, s in self._get_zip_labels_samples():
if s == sample:
return lbl
raise KeyError(f"No label found for '{sample}'")
def samples_of(self, label):
"""Get a list of sample names from label"""
snames = [s for lbl, s in self._get_zip_labels_samples() if lbl == label]
return snames
def _get_subset_data_indexer(self, sample=None, label=None, no_drop_na=False):
if sample is None and label is None:
return list(self._df.columns)
if sample is not None:
if _is_string(sample):
samples = [sample]
else:
samples = list(sample)
indexer = []
for s in samples:
if s not in self.samples:
raise KeyError(f"'{s}' is not a sample name")
lbl = self.label_of(s)
indexer.append((lbl, s))
if len(indexer) == 1:
indexer = indexer[0]
return indexer
elif sample is None and label is not None:
if _is_string(label):
labels = [label]
else:
labels = list(label)
indexer = []
for s in labels:
if s not in self.labels:
raise KeyError(f"'{s}' is not a label")
indexer.append(s)
#indexer = (indexer,)
return indexer
else:
raise KeyError("Sample name or label not found")
def _get_subset_data(self, sample=None, label=None, no_drop_na=False):
if sample is None and label is None:
df = self._df
else:
col_indexer = self.subset_iloc(sample=sample, label=label)
df = self._df.iloc[:, col_indexer]
# col_indexer = self._get_subset_data_indexer(sample=sample, label=label)
# df = self._df.loc[:, col_indexer]
df = df.copy() if no_drop_na else df.dropna(how="all")
if isinstance(df, pd.DataFrame):
df.columns = df.columns.remove_unused_levels()
return df
def take(self, **kwargs):
"""Retrieves subset of data by sample name or label."""
return self._get_subset_data(**kwargs)
def subset(self, **kwargs):
"""Alias for take()."""
return self.take(**kwargs)
def features(self, **kwargs):
"""Get the row index (features) indexing data by sample name or label"""
df = self._get_subset_data(**kwargs)
return df.index
def subset_where(self, sample=None, label=None):
"""return a boolean DataFrame with the location of subset."""
df = pd.DataFrame(False, index=self._df.index, columns=self._df.columns)
col_indexer = self._get_subset_data_indexer(sample=sample, label=label)
df.loc[:, col_indexer] = True
return df
def subset_loc(self, sample=None, label=None):
"""return an indexing list col_indexer to be used with .loc[:, col_indexer] for a subset."""
col_indexer = self._get_subset_data_indexer(sample=sample, label=label)
return col_indexer
def subset_iloc(self, sample=None, label=None):
"""return an indexing list col_indexer to be used with .iloc[:, col_indexer] for a subset."""
if sample is None and label is None:
return list(range(len(self._df.columns)))
self._df.columns = self._df.columns.remove_unused_levels()
if sample is not None:
if _is_string(sample):
samples = [sample]
else:
samples = list(sample)
for s in samples:
if s not in self.samples:
raise KeyError(f"'{s}' is not a sample name")
indexer = []
list_samples = list(self.samples)
for i, s in enumerate(list_samples):
if s in samples:
indexer.append(i)
if len(indexer) == 1:
indexer = indexer[0]
return indexer
elif sample is None and label is not None:
if _is_string(label):
labels = [label]
else:
labels = list(label)
for s in labels:
if s not in self.labels:
raise KeyError(f"'{s}' is not a label")
indexer = []
list_labels = list(self.labels)
for i, lbl in enumerate(list_labels):
if lbl in labels:
indexer.append(i)
if len(indexer) == 1:
indexer = indexer[0]
return indexer
else:
raise KeyError("Sample name or label not found")
return indexer
def pipe(self, func, drop_na=True, **kwargs):
"""Thin wrapper around DataFrame.pipe() with automatic dropna and housekeeping."""
df = self._df
df = df.pipe(func, **kwargs)
if drop_na:
df = df.dropna(how="all")
if isinstance(df, pd.DataFrame):
df.columns = df.columns.remove_unused_levels()
return df
def erase_labels(self):
"""Erase the labels level (level 0) in the column MultiIndex.
CAUTION: the accessor will no longer work or misinterpret levels.
After the application of this function use accessor ums afterwards."""
new_cols = self._df.columns.droplevel(level=0)
return | pd.DataFrame(self._df.values, index=self._df.index, columns=new_cols) | pandas.DataFrame |
import argparse
import sys
import os.path as osp
import os
sys.path.insert(1, osp.abspath(osp.join(os.getcwd(), *('..',) * 2)))
from dataset_preprocess import CoraDataset, PlanetoidDataset
from attack.models import *
import torch
import pandas as pd
from torch_geometric.utils.loop import add_self_loops
import utils
import numpy as np
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def index_to_mask(index, size):
mask = torch.zeros(size, dtype=torch.bool, device=index.device)
mask[index] = 1
return mask
def split_dataset(dataset, new_nodes, train_percent=0.7):
torch.manual_seed(0)
indices = []
_size = dataset.data.num_nodes - new_nodes
y = dataset.data.y[:_size]
for i in range(dataset.num_classes):
index = (y == i).nonzero().view(-1)
index = index[torch.randperm(index.size(0))]
indices.append(index)
train_index = torch.cat([i[:int(len(i) * train_percent)] for i in indices], dim=0)
rest_index = torch.cat([i[int(len(i) * train_percent):] for i in indices], dim=0)
rest_index = rest_index[torch.randperm(rest_index.size(0))]
dataset.data.train_mask = index_to_mask(train_index, size=dataset.data.num_nodes)
dataset.data.val_mask = index_to_mask(rest_index[:len(rest_index) // 2], size=dataset.data.num_nodes)
dataset.data.test_mask = index_to_mask(rest_index[len(rest_index) // 2:], size=dataset.data.num_nodes)
dataset.train_index = train_index[:]
dataset.val_index = rest_index[:len(rest_index) // 2]
dataset.test_index = rest_index[len(rest_index) // 2:]
dataset.data, dataset.slices = dataset.collate([dataset.data])
return dataset
def build_args():
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='cora', help='name of dataset_preprocess')
# dataset_name = ['cora', 'citeseer', 'pubmed']
parser.add_argument('--attack_graph', type=str2bool, default=True, help='global attack')
parser.add_argument('--node_idx', type=int, default=None, help='no target idx')
parser.add_argument('--added_node_num', type=int, default=20, help='num of new nodes')
parser.add_argument('--train_percent', type=float, default=0.7, help='train percent')
parser.add_argument('--sparsity', type=float, default=0.5, help='sparsity')
parser.add_argument('--random', type=bool, default=False, help='random mask')
parser.add_argument('--edge_size', type=float, default=0.005, help='edge_size')
parser.add_argument('--edge_ent', type=float, default=1.0, help='edge_ent')
parser.add_argument('--feat_sparsity', type=float, default=0.5, help='feat_sparsity')
parser.add_argument('--train_epochs', type=int, default=300, help='epochs for training a GNN model')
parser.add_argument('--attack_epochs', type=int, default=600, help='epochs for attacking a GNN model')
parser.add_argument('--retrain_epochs', type=int, default=10,
help='epochs for retraining a GNN model with new graph')
parser.add_argument('--seed', type=int, default=42, help='seed')
args = parser.parse_args()
return args
def fix_random_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # multi gpu
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
np.random.seed(seed)
if __name__ == '__main__':
args = build_args()
fix_random_seed(seed=args.seed)
ADD_ZERO = 0
# step 1: train baseline
data_name = args.dataset_name
if data_name in ["cora", 'photo']:
baseline = CoraDataset('./datasets', data_name, added_new_nodes=ADD_ZERO)
else:
# for dataset_preprocess pubmed, and citeseer
baseline = PlanetoidDataset('./datasets', data_name, added_new_nodes=ADD_ZERO)
print(" load data finished")
split_dataset_name = "baseline_"+data_name+"_split"
split_path = osp.join('./datasets', split_dataset_name, 'train_percent', str(args.train_percent), 'added_node', str(ADD_ZERO))
if not osp.isdir(split_path):
dataset = split_dataset(baseline, ADD_ZERO, train_percent=args.train_percent)
os.makedirs(split_path)
torch.save(baseline, osp.join(split_path, 'split_data.pt'))
else:
baseline = torch.load(osp.join(split_path, 'split_data.pt'))
edge_index_with_loop, _ = add_self_loops(baseline.data.edge_index, num_nodes=baseline.data.num_nodes)
baseline.data.edge_index = edge_index_with_loop
dim_node = baseline.num_node_features
dim_edge = baseline.num_edge_features
num_classes = baseline.num_classes
baseline_model_ckpt_fold = osp.join('checkpoints', data_name, str(args.train_percent),'GCN_2l', 'seed', str(args.seed))
if not osp.isdir(baseline_model_ckpt_fold):
os.makedirs(baseline_model_ckpt_fold)
baseline_model_ckpt_path = osp.join(baseline_model_ckpt_fold,'GCN_2l_best.ckpt')
model = GCN_2l(model_level='node', dim_node=dim_node, dim_hidden=16, num_classes=num_classes)
model.to(device)
if osp.isfile(baseline_model_ckpt_path):
print(" loding from file")
model.load_state_dict(torch.load(baseline_model_ckpt_path)['state_dict'])
else:
baseline.data = baseline.data.to(device)
utils.train(model, baseline.data, baseline_model_ckpt_path, lr=0.005, epochs=args.train_epochs, verbose=False)
# check baseline output of sepcific node for target attack
if not args.attack_graph:
print(" generate target attack test node list")
model.eval()
corrected_test_node_id_list = []
cnt = 0
with torch.no_grad():
# print("baseline test index", baseline.test_index)
output = model(baseline.data.x, baseline.data.edge_index, None)
cols = ["test_ID", "predict_class"]
success = None
# y is from [0-6]
tmp_list = []
for id in baseline.test_index:
# print("test id is ", id.item())
node_idx = id.item()
predict_class = torch.argmax(output[node_idx], dim=0).item()
origin = baseline.data.y[node_idx]
if origin == predict_class:
corrected_test_node_id_list.append(node_idx)
cnt += 1
# tmp_list.append([node_idx, predict_class, str(output[node_idx].detach().numpy())])
tmp_list.append([node_idx, predict_class])
# print("tmp list", tmp_list)
print("acc = ", cnt/baseline.test_index.size(0))
path = osp.join('./results/target_attack', data_name)
if not osp.isdir(path):
os.makedirs(path)
file = f'{path}/train_percent_{args.train_percent}_corrected_test_ID_res.csv'
print(" test id list file saved at", file)
df = pd.DataFrame(tmp_list,columns=cols)
if not os.path.isfile(file):
df.to_csv(file, index=False)
vis_file = f'{path}/train_percent_{args.train_percent}_baseline_A_X_res.pkl'
print(" baseline visualization file saved at ", vis_file)
utils.save_to_file(
[baseline.data.edge_index.to('cpu'), torch.argmax(output.to('cpu'), dim=1), baseline.data.x[:]], vis_file)
exit(-1)
# args.node_idx = baseline.test_index[0].item()
# print(" node idx ", args.node_idx)
# print(" output shape ", output.shape, output[args.node_idx], type(output[args.node_idx]))
# pred_class = torch.argmax(output[args.node_idx], dim=0).item()
# print(" pred class = ", pred_class)
# origin = baseline.data.y[args.node_idx]
# print(" origin", origin)
# utils.save_to_file([baseline.data.edge_index.to('cpu'), torch.argmax(output.to('cpu'), dim=1), baseline.data.x[:]], 'baseline'+str(args.node_idx)+'dataset_preprocess.pkl')
# exit(-1)
test_acc, test_loss, macro_precision, macro_recall, macro_f1_score, weighted_precision, weighted_recall, weighted_f1_score = utils.evaluate(
model, baseline.data, baseline.data.test_mask)
print(" test acc", test_acc)
columns = ['seed', 'accuracy', 'macro_precision', 'macro_recall', 'macro_f1_score', 'weighted_precision',
'weighted_recall', 'weighted_f1_score']
# path = f'results/baseline'
path = osp.join('./results/baseline', data_name)
if not osp.isdir(path):
os.makedirs(path)
file = f'{path}/train_percent_{args.train_percent}_res.csv'
print(" res save as ", file)
# file = f'{path}/.csv'
res_list = [args.seed,
test_acc,
macro_precision,
macro_recall,
macro_f1_score,
weighted_precision,
weighted_recall,
weighted_f1_score]
df = pd.DataFrame([res_list],columns=columns)
if not os.path.isfile(file):
df.to_csv(file, index=False)
else:
prev_res = | pd.read_csv(file) | pandas.read_csv |
"""
Test the converters.
Currently, there is only the Kojak converter.
"""
import os
import pytest
import numpy as np
import pandas as pd
import xenith
from xenith.convert.kojak import _count_proteins
from xenith.convert.kojak import _all_decoy
from xenith.convert.kojak import _read_percolator
from xenith.convert.kojak import _read_kojak
from xenith.convert.kojak import _write_pin
@pytest.fixture
def kojak_files():
"""Locations of test Kojak files"""
kojak = os.path.join("tests", "data", "test.kojak.txt")
intra = os.path.join("tests", "data", "test.perc.intra.txt")
inter = os.path.join("tests", "data", "test.perc.inter.txt")
return (kojak, inter, intra)
def test_convert_kojak(tmpdir, kojak_files):
"""
Test the conversion of Kojak results to xenith tab-delimited format.
"""
out_file = os.path.join(tmpdir, "test.txt")
xenith.convert.kojak(kojak_files[0], kojak_files[1], kojak_files[2],
out_file=out_file)
# Because there are only 2 proteins in these results, 'intraprotein'
# should always be 0.
dataset = xenith.load_psms(out_file)
intraprotein = dataset.features.intraprotein.tolist()
assert all(not x for x in intraprotein)
# also verify to_pin doesn't error:
xenith.convert.kojak(kojak_files[0], kojak_files[1], kojak_files[2],
out_file=out_file, to_pin=True)
def test_count_proteins():
"""Test that count proteins works as expected."""
proteins = pd.DataFrame({"ProteinA": ["a", "b", "c", "a;b"],
"ProteinB": ["c", "a;c", "b", "b;c;a"]})
num_prot = _count_proteins(proteins)
assert num_prot == 3
def test_all_decoy():
"""Test that the _all_decoy function works."""
proteins = pd.Series(["a_b", "d_c", "a_b;d_c", "d_a;d_b", "d_c;a_a"])
answer = | pd.Series([0, 1, 0, 1, 0]) | pandas.Series |
import cPickle as pickle
import logging
import multiprocessing as mp
import os
import subprocess
import timeit
import pandas as pd
import parallel as par
import atom3.database as db
import atom3.sequence as sequ
def add_conservation_parser(subparsers, pp):
"""Add parser."""
def map_all_pssms_main(args):
map_all_pssms(args.pkl_dataset, args.blastdb, args.output_dir, args.c)
ap = subparsers.add_parser(
'conservation', description='sequence conservation',
help='compute sequence conservation features',
parents=[pp])
ap.set_defaults(func=map_all_pssms_main)
ap.add_argument('pkl_dataset', metavar='pkl', type=str,
help='parsed dataset')
ap.add_argument('blastdb', metavar='bdb', type=str,
help='blast database to do lookups on')
ap.add_argument('output_dir', metavar='output', type=str,
help='directory to output to')
ap.add_argument('-c', metavar='cpus', default=mp.cpu_count(), type=int,
help='number of cpus to use for processing (default:'
' number processors available on current machine)')
def gen_pssm(pdb_filename, blastdb, output_filename):
"""Generate PSSM and PSFM from sequence."""
pdb_name = db.get_pdb_name(pdb_filename)
out_dir = os.path.dirname(output_filename)
work_dir = os.path.join(out_dir, 'work')
if not os.path.exists(work_dir):
os.makedirs(work_dir)
fasta_format = work_dir + "/{:}.fa"
id_format = work_dir + "/{:}.cpkl"
chains, chain_fasta_filenames, id_filenames = sequ.pdb_to_fasta(
pdb_filename, fasta_format, id_format, True)
pssms = []
for chain, chain_fasta_filename, id_filename in \
zip(chains, chain_fasta_filenames, id_filenames):
basename = os.path.splitext(chain_fasta_filename)[0]
pssm_filename = "{}.pssm".format(basename)
blast_filename = "{}.blast".format(basename)
clustal_filename = "{}.clustal".format(basename)
al2co_filename = "{}.al2co".format(basename)
if not os.path.exists(pssm_filename):
logging.info("Blasting {:}".format(chain_fasta_filename))
_blast(chain_fasta_filename, pssm_filename, blast_filename,
blastdb)
if not os.path.exists(pssm_filename):
logging.warning("No hits for {:}".format(chain_fasta_filename))
# Create empty file.
open(pssm_filename, 'w').close()
if not os.path.exists(clustal_filename):
logging.info("Converting {:}".format(blast_filename))
_to_clustal(blast_filename, clustal_filename)
if not os.path.exists(al2co_filename):
logging.info("Al2co {:}".format(al2co_filename))
_al2co(clustal_filename, al2co_filename)
if os.stat(pssm_filename).st_size != 0:
pssm = pd.read_csv(
pssm_filename, skiprows=2, skipfooter=6, delim_whitespace=True,
engine='python', usecols=range(20), index_col=[0, 1])
pssm = pssm.reset_index()
del pssm['level_0']
pssm.rename(columns={'level_1': 'orig'}, inplace=True)
pscm = pd.read_csv(
pssm_filename, skiprows=2, skipfooter=6, delim_whitespace=True,
engine='python', usecols=range(20, 40), index_col=[0, 1])
psfm = pscm.applymap(lambda x: x / 100.)
psfm = psfm.reset_index()
del psfm['level_0']
psfm.columns = pssm.columns
del psfm['orig']
del pssm['orig']
# Combine both into one.
psfm = psfm.add_prefix('psfm_')
pssm = pssm.add_prefix('pssm_')
al2co = pd.read_csv(
al2co_filename, delim_whitespace=True, usecols=[2],
names=['al2co'])
pssm = pd.concat([pssm, psfm, al2co], axis=1)
else:
logging.warning("No pssm found for {:} (model {:}, chain {:})"
.format(pdb_name, chain[-2], chain[-1]))
pssm, psfm = None, None
pdb_name = db.get_pdb_name(pdb_filename)
key = pdb_name + '-' + chain[-2] + '-' + chain[-1]
pos_to_res = pickle.load(open(id_filename))[key]
pssm['pdb_name'] = db.get_pdb_name(pdb_filename)
pssm['model'] = chain[0]
pssm['chain'] = chain[1]
pssm['residue'] = pos_to_res
pssms.append(pssm)
pssms = pd.concat(pssms)
return pssms
def map_pssms(pdb_filename, blastdb, output_filename):
pdb_name = db.get_pdb_name(pdb_filename)
start_time = timeit.default_timer()
start_time_blasting = timeit.default_timer()
pis = gen_pssm(pdb_filename, blastdb, output_filename)
num_chains = len(pis.groupby(['pdb_name', 'model', 'chain']))
elapsed_blasting = timeit.default_timer() - start_time_blasting
parsed = | pd.read_pickle(pdb_filename) | pandas.read_pickle |
import pathlib
import numpy as np
import pandas as pd
from ..designMethods.en_13001_3_3 import ENComputation, LoadCollectivePrediction, MARSInput
from .output import ResultWriter
from ..designMethods.en_13001_3_3.input_error_check import InputFileError
class MainApplication():
def __init__(self) -> None:
self.input = MARSInput()
self.prediction = LoadCollectivePrediction()
self.computation: ENComputation
self.input_file_path: pathlib.Path
self.output_file_path: pathlib.Path
self.result_writer: ResultWriter
self.sc_direction: int
self.config: str
def read_input_file(self, filename: pathlib.Path) -> None:
# load data for load collective prediction
self.input.clear_inputs()
try:
self.input.read_input_df(filename)
self.input.check_input_df()
if len(self.input.input_df.columns) == 1:
raise InputFileError("More than 3 empty cells in all configurations")
self.input.load_gp_input("Stacker Crane (SC) And Rack Configuration")
# load en 13001 parameters
self.input.load_parameter_input("EN-13001-3-3")
# load materials
self.input.materials.read(filename, "rail_materials", "wheel_materials")
# load geometries
self.input.geometries.read(filename, "rail_geometries", "wheel_geometries")
# check materials and geometries
self.input.geometry_and_material_error_check()
# # load materials
# self.input.load_material_input_check(filename, "rail_materials", "wheel_materials")
# # load rail and wheel geometries
# self.input.load_geometry_input_check(filename, "rail_geometries", "wheel_geometries")
# check for input errors and drop faulty configurations
self.input.perform_error_checks()
self.input.drop_error_configs()
if len(self.input.parameters.gen_params) == 0:
raise InputFileError("At least one error in all configurations")
except InputFileError as e:
raise e
except ValueError as e:
if "Worksheet" in str(e):
raise InputFileError("Broken input file: one or more required input sheets were missing. required sheets are: Input_variables, rail_materials, wheel_materials, wheel_geometries, rail_geometries") from e
raise InputFileError("Unknown fatal error with input file, please redownload") from e
except Exception as e:
if "sheet" in str(e):
raise InputFileError("Broken input file: one or more required input sheets were missing. required sheets are: Input_variables, rail_materials, wheel_materials, wheel_geometries, rail_geometries") from e
raise InputFileError("Unknown fatal error with input file, please redownload") from e
def prepare_gp_input(self):
self.input.recompute_gp_data(self.config)
# check gp input variables for values outside expected intervals
self.input.perform_gp_input_warning_check()
def run_computation(self) -> None:
self.input.clear_computed_inputs()
self.input.set_materials_and_geometry()
self.input.parameters.compute_f_f3()
self.input.parameters.compute_contact_and_f_1()
self.prediction.clear_prediction_results()
# assign f_sd_s
self.prediction.load_f_sd_s(self.input.parameters.gen_params["F_sd_s_w"], self.input.parameters.gen_params["F_sd_s_r"])
self.prediction.predict_kc(self.input.gp_input.norm)
self.prediction.compute_F_sd_f_all(self.input.gp_input.raw, self.config, self.sc_direction)
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_w"], "wf")
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_w"], "wr")
self.prediction.recompute_kc(self.input.parameters.gen_params["F_sd_f_r"], "r")
self.prediction.predict_travelled_dist(self.input.gp_input.raw["cycle_mode"], self.input.gp_input.raw["num_cycles_wheel"], self.input.gp_input.raw["r_l"])
# create computation instance and compute configs
self.computation = ENComputation()
self.computation.load_data(self.input, self.prediction)
self.computation.compute_pre_F_rd_all()
self.computation.compute_F_rd_all()
self.computation.compute_proofs_all()
def initialize_result_writer(self):
# pick a filename that doesn't exist yet
self.result_writer = ResultWriter(self.computation, self.input, self.output_file_path)
self.result_writer.create_summary()
def computation_mode_1(self) -> None:
self.prepare_gp_input()
self.run_computation()
# reults output
self.input.prepare_for_output()
self.computation.load_results_all()
self.initialize_result_writer()
self.result_writer.write()
# create_output_file(self.computation, self.input, self.output_file_path)
def computation_mode_2(self) -> None:
self.prepare_gp_input()
# sort wheel geometries by diameter
self.input.geometries.wheel.sort_values("D", inplace=True)
wheel_geometries = list(self.input.geometries.wheel.index)
proof_results = np.empty((len(wheel_geometries), len(self.input.parameters.gen_params)))
for idx, wheel_geometry in enumerate(wheel_geometries):
self.input.parameters.gen_params.loc[:, "wheel_geometry"] = wheel_geometry
self.run_computation()
# check if all proofs are fullfilled
proof_results[idx, :] = np.logical_and.reduce((
self.computation.wheel_f.proofs["static"], self.computation.wheel_f.proofs["fatigue"].loc[:, "preds"],
self.computation.wheel_r.proofs["static"], self.computation.wheel_r.proofs["fatigue"].loc[:, "preds"],
self.computation.rail.proofs["static"], self.computation.rail.proofs["fatigue"].loc[:, "preds"]
))
wheel_geometries_min_d = pd.Series(wheel_geometries)[proof_results.argmax(axis=0)]
wheel_geometries_min_d = pd.DataFrame(wheel_geometries_min_d, columns=["Min. Wheel Geometry"])
wheel_geometries_min_d.index = range(len(wheel_geometries_min_d))
wheel_geometries_min_d[proof_results.sum(axis=0) < 1] = wheel_geometries[-1]
self.input.parameters.gen_params.loc[:, "wheel_geometry"] = list(wheel_geometries_min_d.to_numpy())
self.run_computation()
wheel_geometries_min_d[proof_results.sum(axis=0) < 1] = "NaN"
# drop wheel geometry from output params
# self.input.parameters.gen_params_out.drop(columns="wheel_geometry", inplace=True)
# reults output
self.input.prepare_for_output()
self.computation.load_results_all()
# add wheel geometry to output
self.computation.wheel_f.results["static"] = pd.concat(
[wheel_geometries_min_d, self.computation.wheel_f.results["static"]], axis=1
)
self.computation.wheel_r.results["static"] = pd.concat(
[wheel_geometries_min_d, self.computation.wheel_r.results["static"]], axis=1
)
self.initialize_result_writer()
# add wheel geometry to summary
self.result_writer.summary["wheel_f"] = pd.concat([wheel_geometries_min_d, self.result_writer.summary["wheel_f"].T], axis=1)
self.result_writer.summary["wheel_r"] = | pd.concat([wheel_geometries_min_d, self.result_writer.summary["wheel_r"].T], axis=1) | pandas.concat |
#!/usr/bin/env python
# encoding:utf-8
"""
Author : <NAME>
Date : 2021/8/4
Time: 20:06
File: precision_table_plot.py
HomePage : http://github.com/yuanqingmei
Email : <EMAIL>
compute the avg std max min values and draw the box plot of precision and recall.
"""
import time
def precision_table_plot(working_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\",
plot_dir="F:\\NJU\\MTmeta\\experiments\\pooled\\plots\\"):
import os
import csv
import pandas as pd
import matplotlib.pyplot as plt
| pd.set_option('display.max_columns', None) | pandas.set_option |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if PY2 and isinstance(left, string_types):
# left needs to be printable in native text type in python2
left = left.encode('utf-8')
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
if PY2 and isinstance(right, string_types):
# right needs to be printable in native text type in python2
right = right.encode('utf-8')
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
check_same=None, obj='numpy array'):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right, check_dtype=True,
check_less_precise=False,
check_exact=False):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), 'left is not an ExtensionArray'
assert isinstance(right, ExtensionArray), 'right is not an ExtensionArray'
if check_dtype:
assert_attr_equal('dtype', left, right, obj='ExtensionArray')
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj='ExtensionArray NA mask')
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj='ExtensionArray')
else:
_testing.assert_almost_equal(left_valid, right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj='ExtensionArray')
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif (is_extension_array_dtype(left) and not is_categorical_dtype(left) and
is_extension_array_dtype(right) and not | is_categorical_dtype(right) | pandas.core.dtypes.common.is_categorical_dtype |
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation']).astype(str)
#To get the Factory Data
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
#Making Connection to the Database
cur = con.cursor()
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Status.to_sql(con=engine, name='facilityallocation',index=False, if_exists='replace')
cur = con.cursor()
cur1 = con.cursor()
cur.execute("SELECT * FROM `facilityallocation`")
file=cur.fetchall()
dat=pd.DataFrame(file)
lst=dat[['Factory','Customer']]
mlst=[]
names=lst['Factory'].unique().tolist()
for name in names:
lsty=lst.loc[lst.Factory==name]
mlst.append(lsty.values)
data=dat[['Factory','Customer','Allocation']]
sql="SELECT SUM(`Allocation`) AS 'UseCapacity', `Factory` FROM `facilityallocation` GROUP BY `Factory`"
cur1.execute(sql)
file2=cur1.fetchall()
udata=pd.DataFrame(file2)
bdata=factorydata.sort_values(by=['Factory'])
adata=bdata['Capacity']
con.close()
infdata=dat[['Customer','Factory','Allocation']]
infodata=infdata.sort_values(by=['Customer'])
namess=infodata.Customer.unique()
lstyy=[]
for nam in namess:
bb=infodata[infodata.Customer==nam]
comment=bb['Factory']+":"+bb['Allocation']
prin=[nam,str(comment.values).strip('[]')]
lstyy.append(prin)
return render_template('facilityoptimise.html',say=1,lstyy=lstyy,x1=adata.values,x2=udata.values,dat=mlst,loc1=factorydata.values,
loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False),summary=data.to_html(index=False))
#Demand Forecast
@app.route('/demandforecast')
def demandforecast():
return render_template('demandforecast.html')
@app.route("/demandforecastdataimport",methods = ['GET','POST'])
def demandforecastdataimport():
if request.method== 'POST':
global actualforecastdata
flat=request.files['flat'].read()
if len(flat)==0:
return('No Data Selected')
cdat=pd.read_csv(io.StringIO(flat.decode('utf-8')))
actualforecastdata=pd.DataFrame(cdat)
return render_template('demandforecast.html',data=actualforecastdata.to_html(index=False))
@app.route('/demandforecastinput', methods = ['GET', 'POST'])
def demandforecastinput():
if request.method=='POST':
global demandforecastfrm
global demandforecasttoo
global demandforecastinputdata
demandforecastfrm=request.form['from']
demandforecasttoo=request.form['to']
value=request.form['typedf']
demandforecastinputdata=actualforecastdata[(actualforecastdata['Date'] >= demandforecastfrm) & (actualforecastdata['Date'] <= demandforecasttoo)]
if value=='monthly': ##monthly
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
demandforecastinputdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('monthlyforecast'))
if value=='quarterly': ##quarterly
global Quaterdata
dated2 = demandforecastinputdata['Date']
nlst=[]
for var in dated2:
var1 = int(var[5:7])
if var1 >=1 and var1 <4:
varr=var[:4]+'-01-01'
elif var1 >=4 and var1 <7:
varr=var[:4]+'-04-01'
elif var1 >=7 and var1 <10:
varr=var[:4]+'-07-01'
else:
varr=var[:4]+'-10-01'
nlst.append(varr)
nwlst=pd.DataFrame(nlst,columns=['Newyear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=nwlst['Newyear']
Quaterdata=demandforecastinputdata.groupby(['Date']).sum()
Quaterdata=Quaterdata.reset_index()
Quaterdata=Quaterdata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Quaterdata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('quarterlyforecast'))
if value=='yearly': ##yearly
global Yeardata
#copydata=demandforecastinputdata
dated1 = demandforecastinputdata['Date']
lst=[]
for var in dated1:
var1 = var[:4]+'-01-01'
lst.append(var1)
newlst=pd.DataFrame(lst,columns=['NewYear'])
demandforecastinputdata=demandforecastinputdata.reset_index()
demandforecastinputdata['Date']=newlst['NewYear']
Yeardata=demandforecastinputdata.groupby(['Date']).sum()
Yeardata=Yeardata.reset_index()
Yeardata=Yeardata.drop('index',axis=1)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Yeardata.to_sql(con=engine, name='demandforecastinputdata', index=False,if_exists='replace')
return redirect(url_for('yearlyforecast'))
#if value=='weakly': ##weakly
# return redirect(url_for('output4'))
return render_template('demandforecast.html')
@app.route("/monthlyforecast",methods = ['GET','POST'])
def monthlyforecast():
data = pd.DataFrame(demandforecastinputdata)
# container1
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])
# container2
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])
# container3
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])
# container4
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])
# container1
df=a1[['GDP']]
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutput`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutput`")
con.commit()
sql = "INSERT INTO `forecastoutput` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutput` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutput',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutput`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('monthly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('monthly.html',sayy=1,smt='Monthly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##quarterly
@app.route("/quarterlyforecast",methods = ['GET','POST'])
def quarterlyforecast():
data = pd.DataFrame(Quaterdata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/3
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date'].astype(str)
tre11 = np.array([])
tres11 = np.append(tre11,tdf)
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/3
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputq`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputq`")
con.commit()
sql = "INSERT INTO `forecastoutputq` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
tdfs['Date']=(tdfs.index).strftime("20%y-%m-%d")
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='3M')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='3M', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y-%m-%d")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='3M')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='3M', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y-%m-%d")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputq` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputq',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputq`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('quarterly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('quarterly.html',sayy=1,smt='Quarterly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
##yearly
@app.route("/yearlyforecast",methods = ['GET','POST'])
def yearlyforecast():
data = pd.DataFrame(Yeardata)
a1=data.sort_values(['GDP','TotalDemand'], ascending=[True,True])# container1
a2=data.sort_values(['Pi_Exports','TotalDemand'], ascending=[True,True])# container2
a3=data.sort_values(['Market_Share','TotalDemand'], ascending=[True,True])# container3
a4=data.sort_values(['Advertisement_Expense','TotalDemand'], ascending=[True,True])# container4
# container1
df=a1[['GDP']]/12
re11 = np.array([])
res11 = np.append(re11,df)
df1=a1[['TotalDemand']]
r1 = np.array([])
r11 = np.append(r1, df1)
# top graph
tdf=data['Date']
vari=[]
for var in tdf:
vari.append(var[:4])
tres11 = vari
tr1 = np.array([])
tr11 = np.append(tr1, df1)
# container2
udf=a2[['Pi_Exports']]
ure11 = np.array([])
ures11 = np.append(ure11,udf)
ur1 = np.array([])
ur11 = np.append(ur1, df1)
# container3
vdf=a3[['Market_Share']]/12
vre11 = np.array([])
vres11 = np.append(vre11,vdf)
vr1 = np.array([])
vr11 = np.append(vr1, df1)
# container4
wdf=a4[['Advertisement_Expense']]
wre11 = np.array([])
wres11 = np.append(wre11,wdf)
wr1 = np.array([])
wr11 = np.append(wr1, df1)
if request.method == 'POST':
mov=0
exp=0
reg=0
ari=0
arx=0
till = request.form.get('till')
if request.form.get('moving'):
mov=1
if request.form.get('ESPO'):
exp=1
if request.form.get('regression'):
reg=1
if request.form.get('ARIMA'):
ari=1
if request.form.get('ARIMAX'):
arx=1
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `ftech` (`mov` VARCHAR(1),`exp` VARCHAR(1), `reg` VARCHAR(1),`ari` VARCHAR(1),`arx` VARCHAR(1),`till` VARCHAR(10))")
cur.execute("DELETE FROM `ftech`")
con.commit()
cur.execute("INSERT INTO `ftech` VALUES('"+str(mov)+"','"+str(exp)+"','"+str(reg)+"','"+str(ari)+"','"+str(arx)+"','"+str(till)+"')")
con.commit()
cur.execute("CREATE TABLE IF NOT EXISTS `forecastoutputy`(`Model` VARCHAR(25),`Date` VARCHAR(10),`TotalDemand` VARCHAR(10),`RatioIncrease` VARCHAR(10),`Spain` VARCHAR(10),`Austria` VARCHAR(10),`Japan` VARCHAR(10),`Hungary` VARCHAR(10),`Germany` VARCHAR(10),`Polland` VARCHAR(10),`UK` VARCHAR(10),`France` VARCHAR(10),`Romania` VARCHAR(10),`Italy` VARCHAR(10),`Greece` VARCHAR(10),`Crotia` VARCHAR(10),`Holland` VARCHAR(10),`Finland` VARCHAR(10),`Hongkong` VARCHAR(10))")
con.commit()
cur.execute("DELETE FROM `forecastoutputy`")
con.commit()
sql = "INSERT INTO `forecastoutputy` (`Model`,`Date`,`TotalDemand`,`RatioIncrease`,`Spain`,`Austria`,`Japan`,`Hungary`,`Germany`,`Polland`,`UK`,`France`,`Romania`,`Italy`,`Greece`,`Crotia`,`Holland`,`Finland`,`Hongkong`) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
#read the monthly file and index that with time
df=data.set_index('Date')
split_point =int(0.7*len(df))
D, V = df[0:split_point],df[split_point:]
data=pd.DataFrame(D)
#Functions for ME, MAE, MAPE
#ME
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
cur1=con.cursor()
cur1.execute("SELECT * FROM `ftech`")
ftech=pd.DataFrame(cur1.fetchall())
ari=int(ftech['ari'])
arx=int(ftech['arx'])
exp=int(ftech['exp'])
mov=int(ftech['mov'])
reg=int(ftech['reg'])
start_index1=str(D['GDP'].index[-1])
end_index1=str(ftech['till'][0])
#end_index1=indx[:4]
df2 = pd.DataFrame(data=0,index=["ME","MAE","MAPE"],columns=["Moving Average","ARIMA","Exponential Smoothing","Regression"])
if mov==1:
#2---------------simple moving average-------------------------
#################################MovingAverage#######################
list1=list()
def mavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(0,0,1))
results_ARIMA1=model1.fit(disp=0)
# start_index1 = '2017-01-01'
# end_index1 = '2022-01-01' #4 year forecast
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list1.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["Moving Average"].iloc[0]=s
df2["Moving Average"].iloc[1]=so
df2["Moving Average"].iloc[2]=son
s=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(s)+1):
a=s.iloc[j-2]
b=s.iloc[j-1]
ratio_inc.append(int(((b-a)/a)*100))
return list1,ratio_inc
print(data)
Ma_Out,ratio_incma=mavg(data)
dfs=pd.DataFrame(Ma_Out)
tdfs=dfs.T
print(tdfs)
tdfs.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
tdfs['Model']='Moving Average'
tdfs['RatioIncrease']=ratio_incma
dindex=(tdfs.index).strftime("20%y")
tdfs['Date']=(dindex)
tdfs.astype(str)
for index, i in tdfs.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if ari==1:
##--------------min errors--ARIMA (1,0,0)-----------------------------
############################for Total Demand Monthly####################################
list2=list()
def AutoRimavg(data):
m=len(data.columns.tolist())
for i in range(0,m-5):
#Arima Model Fitting
model1=ARIMA(data[data.columns.tolist()[i]].astype(float), order=(1,0,0))
results_ARIMA1=model1.fit(disp=0)
ARIMA_fit1= results_ARIMA1.fittedvalues
forecast2=results_ARIMA1.predict(start=start_index1, end=end_index1)
list2.append(forecast2)
if(i==0):
#ME
s=ME(data['TotalDemand'],ARIMA_fit1)
#MAE
so=MAE(data['TotalDemand'],ARIMA_fit1)
#MAPE
son=MAPE(data['TotalDemand'],ARIMA_fit1)
df2["ARIMA"].iloc[0]=s
df2["ARIMA"].iloc[1]=so
df2["ARIMA"].iloc[2]=son
Ars=pd.DataFrame(forecast2)
ratio_inc=[]
ratio_inc.append(0)
for j in range(2,len(Ars)+1):
As=(Ars.iloc[j-2])
bs=(Ars.iloc[j-1])
ratio_inc.append(int(((As-bs)/As)*100))
return list1,ratio_inc
Arimamodel,ratio_inc=AutoRimavg(data)
Amodel=pd.DataFrame(Arimamodel)
Results=Amodel.T
Results.astype(str)
Results.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
Results['Model']="ARIMA"
Results['RatioIncrease']=ratio_inc
Results['Date']=Results.index.astype(str)
for index, i in Results.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if reg==1:
#Linear Regression
#Regression Modeling
dates=pd.date_range(start_index1,end_index1,freq='A')
lprd=len(dates)
dateofterms= pd.PeriodIndex(freq='A', start=start_index1, periods=lprd+1)
dofterm=dateofterms.strftime("20%y")
Rdate=pd.DataFrame(dofterm)
noofterms=len(dofterm)
def regression(data,V,noofterms):
#Getting length of Data Frame
lenofdf=len(data.columns.tolist())
#Getting List Of Atributes in Data Frame
listofatr=list()
listofatr=data.columns.tolist()
#making list of pred
pred=pd.DataFrame()
#now riun for each row
for i in range(0,(lenofdf)-5):
df=pd.DataFrame(data[data.columns.tolist()[i]].reset_index())
xvar=list()
for row in df[listofatr[i]]:
xvar.append(row)
df5=pd.DataFrame(xvar)
yvar=list()
for j in range(0,len(df[listofatr[i]])):
yvar.append(j)
dfss=pd.DataFrame(yvar)
clf = linear_model.LinearRegression()
clf.fit(dfss,df5)
# Make predictions using the testing set
dfv=pd.DataFrame(V[V.columns.tolist()[i]].reset_index())
k=list()
for l in range(len(df[listofatr[i]]),len(df[listofatr[i]])+len(dfv)):
k.append(l)
ks=pd.DataFrame(k)
#Future prediction
predlist=list()
for j in range(len(df[listofatr[i]]),len(df[listofatr[i]])+noofterms):
predlist.append(j)
dataframeoflenofpred=pd.DataFrame(predlist)
dateframeofpred=pd.DataFrame(clf.predict(dataframeoflenofpred))
pred=pd.concat([pred,dateframeofpred],axis=1)
#Accuracy Of the mODEL
y_pred = clf.predict(ks)
if(i==0):
meanerror=ME(dfv[listofatr[i]], y_pred)
mae=MAE(dfv[listofatr[i]], y_pred)
mape=MAPE(dfv[listofatr[i]],y_pred)
df2["Regression"].iloc[0]=meanerror
df2["Regression"].iloc[1]=mae
df2["Regression"].iloc[2]=mape
regp=pd.DataFrame(pred)
ratio_incrr=[]
ratio_incrr.append(0)
for j in range(2,len(regp)+1):
Ra=regp.iloc[j-2]
Rb=regp.iloc[j-1]
ratio_incrr.append(int(((Rb-Ra)/Ra)*100))
return pred,ratio_incrr
monthlyRegression,ratio_incrr=regression(data,V,noofterms)
r=pd.DataFrame(monthlyRegression)
r.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
r['Model']="Regression"
r['Date']=Rdate
r['RatioIncrease']=ratio_incrr
r.astype(str)
for index, i in r.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
if exp==1:
#Exponential Smoothing
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
Edate=pd.DataFrame(dateofterms)
predictonterm=len(Edate)
def exponential_smoothing(series, alpha,predictonterm):
result = [series[0]] # first value is same as series
for i in range(1,len(series)):
result.append(alpha * series[i] + (1 - alpha) * result[i-1])
preds=result[len(series)-1]#pred
actual=series[len(series)-1]#actual
forecastlist=[]
for i in range(0,predictonterm):
forecast=(alpha*actual)+((1-alpha)*preds)
forecastlist.append(forecast)
actual=preds
preds=forecast
return result,forecastlist
def Exponentialmooth(data,alpha,predicterm):
predexp=list()
forecaste=pd.DataFrame()
m=len(data.columns.tolist())
for i in range(0,m-5):
pred,forecasts=exponential_smoothing(data[data.columns.tolist()[i]],0.5,predictonterm)
ss=pd.DataFrame(forecasts)
predexp.append(pred)
forecaste=pd.concat([forecaste,ss],axis=1)
if(i==0):
meanerr=ME(len(data[data.columns.tolist()[i]]),predexp)
meanaverr=MAE(data[data.columns.tolist()[i]],predexp)
mperr=MAPE(data[data.columns.tolist()[i]],predexp)
df2["Exponential Smoothing"].iloc[0]=meanerr
df2["Exponential Smoothing"].iloc[1]=meanaverr
df2["Exponential Smoothing"].iloc[2]=mperr
Exponentials=pd.DataFrame(forecaste)
ratio_incex=[]
ratio_incex.append(0)
for j in range(2,len(Exponentials)+1):
Ea=Exponentials.iloc[j-2]
Eb=Exponentials.iloc[j-1]
ratio_incex.append(int(((Eb-Ea)/Ea)*100))
return forecaste,ratio_incex
fore,ratio_incex=Exponentialmooth(data,0.5,predictonterm)
skf=pd.DataFrame(fore)
skf.columns=["TotalDemand","Spain","Austria","Japan","Hungary","Germany","Polland","UK","France","Romania","Italy","Greece","Crotia","Holland","Finland","Hongkong"]
skf['Model']="Exponential Smoothing"
skf['Date']=Edate
skf['RatioIncrease']=ratio_incex
skf.astype(str)
for index, i in skf.iterrows():
dat = (i['Model'],i['Date'],i['TotalDemand'],i['RatioIncrease'],i['Spain'],i['Austria'],i['Japan'],i['Hungary'],i['Germany'],i['Polland'],i['UK'],i['France'],i['Romania'],i['Italy'],i['Greece'],i['Crotia'],i['Holland'],i['Finland'],i['Hongkong'])
cur.execute(sql,dat)
con.commit()
dates=pd.date_range(start_index1,end_index1,freq='A')
lengthofprd=len(dates)
dateofterm= pd.PeriodIndex(freq='A', start=start_index1, periods=lengthofprd+1)
dateofterms=dateofterm.strftime("20%y")
ss=pd.DataFrame(dateofterms,columns=['Date'])
dataframeforsum=pd.concat([ss])
if mov==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Moving Average'" )
Xmdata = cur.fetchall()
Xmadata = pd.DataFrame(Xmdata)
movsummm=pd.DataFrame(Xmadata)
movsummm.columns=['Moving Average']
dataframeforsum=pd.concat([dataframeforsum,movsummm],axis=1)
if ari==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'ARIMA'" )
Xadata = cur.fetchall()
Xardata = pd.DataFrame(Xadata)
movsumma=pd.DataFrame(Xardata)
movsumma.columns=['ARIMA']
dataframeforsum=pd.concat([dataframeforsum,movsumma],axis=1)
if exp==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Exponential Smoothing'" )
Xedata = cur.fetchall()
Xesdata = pd.DataFrame(Xedata)
exp=pd.DataFrame(Xesdata)
exp.columns=['Exponential Smoothing']
dataframeforsum=pd.concat([dataframeforsum,exp],axis=1)
if reg==1:
cur.execute("SELECT `TotalDemand` FROM `forecastoutputy` WHERE `Model`= 'Regression'" )
Xrdata = cur.fetchall()
Xredata = pd.DataFrame(Xrdata)
regr=pd.DataFrame(Xredata)
regr.columns=['Regression']
dataframeforsum=pd.concat([dataframeforsum,regr],axis=1)
dataframeforsum.astype(str)
from pandas.io import sql
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
dataframeforsum.to_sql(con=engine, name='summaryoutputy',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
df2.to_sql(con=engine2, name='summaryerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `summaryoutputy`")
sdata = cnr.fetchall()
summaryq = pd.DataFrame(sdata)
con.close()
return render_template('yearly.html',summaryq=summaryq.to_html(index=False),sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
return render_template('yearly.html',sayy=1,smt='Yearly',yr1=demandforecastfrm+' to ',yr2=demandforecasttoo,x=res11,y=r11,x1=tres11,y1=tr11,x2=ures11,y2=ur11,x3=vres11,y3=vr11,x4=wres11,y4=wr11)
#############################Dashboard#######################################
#yearly
@app.route('/youtgraph', methods = ['GET','POST'])
def youtgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputy` GROUP BY `Model`")
sfile=cur.fetchall()
global yqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
yqst=qlst.values
con.close()
return render_template('ydashboard.html',qulist=yqst)
@app.route('/youtgraph1', methods = ['GET', 'POST'])
def youtgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputy` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date']
index=np.concatenate((indx,edata['Date'].values),axis=0)
yindx=[]
for var in index:
var1 = var[:4]
yindx.append(var1)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputy` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('ydashboard.html',mon=value,qulist=yqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=yindx,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#monthly
@app.route('/moutgraph', methods = ['GET','POST'])
def moutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutput` GROUP BY `Model`")
sfile=cur.fetchall()
global mqst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
mqst=qlst.values
con.close()
return render_template('mdashboard.html',qulist=mqst)
@app.route('/moutgraph1', methods = ['GET', 'POST'])
def moutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutput` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutput` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('mdashboard.html',mon=value,qulist=mqst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
#quarterly
@app.route('/qoutgraph', methods = ['GET','POST'])
def qoutgraph():
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("SELECT `Model` FROM `forecastoutputq` GROUP BY `Model`")
sfile=cur.fetchall()
global qst
qlist=pd.DataFrame(sfile)
qlst=qlist['Model'].astype(str)
qst=qlst.values
con.close()
return render_template('qdashboard.html',qulist=qst)
@app.route('/qoutgraph1', methods = ['GET', 'POST'])
def qoutgraph1():
if request.method=='POST':
value=request.form['item']
qconn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
qcur = qconn.cursor()
qcur.execute("SELECT * FROM `demandforecastinputdata")
qsdata = qcur.fetchall()
qdata = pd.DataFrame(qsdata)
#graph1
adata=qdata['TotalDemand']
x_axis=qdata['Date'].astype(str)
#predictedgraph1
pcur = qconn.cursor()
pcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
psdata = pcur.fetchall()
edata = pd.DataFrame(psdata)
eedata=edata['TotalDemand'].astype(float)
ldata=eedata.values
nur = qconn.cursor()
nur.execute("SELECT MIN(`Date`) AS 'MIN' FROM `forecastoutputq` WHERE `Model`='"+value+"'")
MIN=nur.fetchone()
pdata=[]
i=0
k=0
a="null"
while(x_axis[i]<MIN['MIN']):
pdata.append(a)
i=i+1
k=k+1
ata=np.concatenate((pdata,ldata),axis=0)
#x axis
fcur = qconn.cursor()
fcur.execute("SELECT `Date` FROM `demandforecastinputdata` WHERE `Date`<'"+MIN['MIN']+"'")
fsdata = fcur.fetchall()
indx = pd.DataFrame(fsdata)
indx=indx['Date'].astype(str).values
index=np.concatenate((indx,edata['Date'].values),axis=0)
#bargraph
bcur = qconn.cursor()
bcur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
bsdata = bcur.fetchall()
bdata = pd.DataFrame(bsdata)
btdf=bdata['Date'].astype(str)
btre11 = np.array([])
btres11 = np.append(btre11,btdf)
b1tdf1=bdata[['Spain']] #spain
b1tr1 = np.array([])
b1tr11 = np.append(b1tr1, b1tdf1)
b2tdf1=bdata[['Austria']] #austria
b2tr1 = np.array([])
b2tr11 = np.append(b2tr1, b2tdf1)
b3tdf1=bdata[['Japan']] #japan
b3tr1 = np.array([])
b3tr11 = np.append(b3tr1, b3tdf1)
b4tdf1=bdata[['Hungary']] #hungry
b4tr1 = np.array([])
b4tr11 = np.append(b4tr1, b4tdf1)
b5tdf1=bdata[['Germany']] #germany
b5tr1 = np.array([])
b5tr11 = np.append(b5tr1, b5tdf1)
b6tdf1=bdata[['TotalDemand']] #total
b6tr1 = np.array([])
b6tr11 = np.append(b6tr1, b6tdf1)
#comparisonbar
ccur = qconn.cursor()
ccur.execute("SELECT * FROM `forecastoutputq` WHERE `Model`='"+value+"'")
csdata = ccur.fetchall()
cdata = pd.DataFrame(csdata)
ctdf=cdata['Date'].astype(str)
ctre11 = np.array([])
ctres11 = np.append(ctre11,ctdf)
c1tdf1=cdata[['RatioIncrease']] #ratioincrease
c1tr1 = np.array([])
c1tr11 = np.append(c1tr1, c1tdf1)
qcur.execute("SELECT * FROM `summaryerror`")
sdata = qcur.fetchall()
mape = pd.DataFrame(sdata)
qconn.close()
return render_template('qdashboard.html',mon=value,qulist=qst,mape=mape.to_html(index=False),say=1,pdata=ata,adata=adata.values,x_axis=index,frm=len(qdata)-1,to=k,x13=btres11,x14=ctres11,y13=b1tr11,y14=b2tr11,y15=b3tr11,y16=b4tr11,y17=b5tr11,y18=b6tr11,y19=c1tr11)
@app.route("/yearlysimulation",methods = ['GET','POST'])
def yearlysimulation():
if request.method == 'POST':
gdp=0
pi=0
ms=0
adv=0
gdp_dis=request.form.get('gdp_dis')
pi_dis=request.form.get('pi_dis')
ms_dis=request.form.get('ms_dis')
adv_dis=request.form.get('adv_dis')
min=request.form.get('min')
max=request.form.get('max')
mue=request.form.get('mue')
sig=request.form.get('sig')
cval=request.form.get('cval')
min1=request.form.get('min1')
max1=request.form.get('max1')
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
cval1=request.form.get('cval1')
min2=request.form.get('min2')
max2=request.form.get('max2')
mue2=request.form.get('mue2')
sig2=request.form.get('sig2')
cval2=request.form.get('cval2')
min3=request.form.get('min3')
max3=request.form.get('max3')
mue3=request.form.get('mue3')
sig3=request.form.get('sig3')
cval3=request.form.get('cval3')
itr= int(request.form.get('itr'))
frm = request.form.get('from')
sfrm=int(frm[:4])
to = request.form.get('to')
sto=int(to[:4])
kwargs={}
atrtable=[]
if request.form.get('gdp'):
gdp=1
atrtable.append('Gdp')
if gdp_dis == 'gdp_dis1':
min=request.form.get('min')
max=request.form.get('max')
kwargs['Gdp_dis']='Uniform'
kwargs['gdpvalues']=[min,max]
if gdp_dis == 'gdp_dis2':
mue=request.form.get('mue')
sig=request.form.get('sig')
kwargs['Gdp_dis']='Normal'
kwargs['gdpvalues']=[mue,sig]
if gdp_dis == 'gdp_dis3':
kwargs['Gdp_dis']='Random'
pass
if gdp_dis == 'gdp_dis4':
cval=request.form.get('cval')
kwargs['Gdp_dis']='Constant'
kwargs['gdpvalues']=[cval]
if request.form.get('pi'):
pi=1
atrtable.append('Pi')
if pi_dis == 'pi_dis1':
min1=request.form.get('min1')
max1=request.form.get('max1')
kwargs['Pi_dis']='Uniform'
kwargs['pivalues']=[min1,max1]
if pi_dis == 'pi_dis2':
mue1=request.form.get('mue1')
sig1=request.form.get('sig1')
kwargs['Pi_dis']='Normal'
kwargs['pivalues']=[mue1,sig1]
if pi_dis == 'pi_dis3':
kwargs['Pi_dis']='Random'
pass
if pi_dis == 'pi_dis4':
cval1=request.form.get('cval1')
kwargs['Pi_dis']='Constant'
kwargs['pivalues']=[cval1]
if request.form.get('ms'):
ms=1
atrtable.append('Ms')
if ms_dis == 'ms_dis1':
min=request.form.get('min2')
max=request.form.get('max2')
kwargs['Ms_dis']='Uniform'
kwargs['msvalues']=[min2,max2]
if ms_dis == 'ms_dis2':
mue=request.form.get('mue2')
sig=request.form.get('sig2')
kwargs['Ms_dis']='Normal'
kwargs['msvalues']=[mue2,sig2]
if ms_dis == 'ms_dis3':
kwargs['Ms_dis']='Random'
pass
if ms_dis == 'ms_dis4':
cval=request.form.get('cval2')
kwargs['Ms_dis']='Constant'
kwargs['msvalues']=[cval2]
if request.form.get('adv'):
adv=1
atrtable.append('Adv')
if adv_dis == 'adv_dis1':
min=request.form.get('min3')
max=request.form.get('max3')
kwargs['Adv_dis']='Uniform'
kwargs['advvalues']=[min3,max3]
if adv_dis == 'adv_dis2':
mue=request.form.get('mue3')
sig=request.form.get('sig3')
kwargs['Adv_dis']='Normal'
kwargs['advvalues']=[mue3,sig3]
if adv_dis == 'adv_dis3':
kwargs['Adv_dis']='Random'
pass
if adv_dis == 'adv_dis4':
cval=request.form.get('cval3')
kwargs['Adv_dis']='Constant'
kwargs['advvalues']=[cval3]
#print(kwargs)
#print(atrtable)
con = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS `stech` (`gdp` VARCHAR(1),`pi` VARCHAR(1), `ms` VARCHAR(1),`adv` VARCHAR(1),`itr` VARCHAR(5),`sfrm` VARCHAR(10),`sto` VARCHAR(10))")
cur.execute("DELETE FROM `stech`")
con.commit()
cur.execute("INSERT INTO `stech` VALUES('"+str(gdp)+"','"+str(pi)+"','"+str(ms)+"','"+str(adv)+"','"+str(itr)+"','"+str(sfrm)+"','"+str(sto)+"')")
con.commit()
data = pd.DataFrame(Yeardata)
#print(data)
data.columns
xvar=pd.concat([data['GDP'],data['Pi_Exports'],data['Market_Share'],data['Advertisement_Expense']],axis=1)
yvar=pd.DataFrame(data['TotalDemand'])
regr = linear_model.LinearRegression()
regr.fit(xvar,yvar)
# predict=regr.predict(xvar)
#Error Measures
def ME(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(y_true - y_pred)
#MAE
def MAE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs(y_true - y_pred))
#MAPE
def MAPE(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_pred)) * 100
def sim(iteration,data,startyear,endyear,atrtable,Gdp_dis=None,gdpvalues=None,Adv_dis=None,advvalues=None,Ms_dis=None,msvalues=None,Pi_dis=None,pivalues=None):
preddata=pd.DataFrame()
simdata=pd.DataFrame()
#Errordf=pd.DataFrame()
Errormsr=pd.DataFrame()
date=pd.date_range(start=pd.datetime(startyear, 1, 1), end=pd.datetime(endyear+1, 1, 1),freq='A')
date=pd.DataFrame(date.strftime("%Y"))
#Fetching The Orignal Data Of Available Years of the Orignal Data That We Have Actually
m=len(date)
Arrayofdates=data['Date']
vari=[]
for var in Arrayofdates:
vari.append(var[:4])
Arrayofdates=pd.DataFrame(vari)
dates=[]
Fetchdata=[]
for i in range(0,m):
years=date.loc[i]
for j in range(0,len(Arrayofdates)):
if int(Arrayofdates.loc[j])==int(years):
da=data['TotalDemand'].loc[j]
Fetchdata.append(da) #Gives Data In the Given Range That we have actually
dates.extend(years) #Gives Years that we have data
for i in range(0,iteration):
df=pd.DataFrame()
#for The Gdp
S='flag'
for row in atrtable:
if row=='Gdp':
S='Gdp'
if S=='Gdp':
for row in Gdp_dis:
if row=='Normal':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Uniform':
gdpdf=pd.DataFrame(np.random.normal(gdpvalues[0],gdpvalues[1],m))
elif row=='Constant':
gdpdf=pd.DataFrame(np.random.choice([gdpvalues[0]],m))
else:
gdpdf=pd.DataFrame(np.random.uniform(-4,4,m))
else:
gdpdf=pd.DataFrame(np.random.uniform(0,0,m))
# for the pi dataframe
O='flag'
for row in atrtable:
if row=='Pi':
O='Pi'
if O=='Pi':
for row in Pi_dis:
if row=='Normal':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Uniform':
pidf=pd.DataFrame(np.random.normal(pivalues[0],pivalues[1],m))
elif row=='Constant':
pidf=pd.DataFrame(np.random.choice([pivalues[0]],m))
else:
pidf=pd.DataFrame(np.random.random_integers(80,120,m))
else:
pidf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Adv Dataframe
N='flag'
for row in atrtable:
if row=='Adv':
N='Adv'
if N=='Adv':
for row in Adv_dis:
if row=='Normal':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Uniform':
advdf=pd.DataFrame(np.random.normal(advvalues[0],advvalues[1],m))
elif row=='Constant':
advdf=pd.DataFrame(np.random.choice([advvalues[0]],m))
else:
advdf=pd.DataFrame(np.random.random_integers(500000,1000000,m))
else:
advdf=pd.DataFrame(np.random.uniform(0,0,m))
#for the Ms dataframe
U='flag'
for row in atrtable:
if row=='Ms':
U='Ms'
if U=='Ms':
for row in Ms_dis:
if row=='Normal':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Uniform':
msdf=pd.DataFrame(np.random.normal(msvalues[0],msvalues[1],m))
elif row=='Constant':
msdf=pd.DataFrame(np.random.choice([msvalues[0]],m))
else:
msdf=pd.DataFrame(np.random.uniform(0.1,0.5,m))
else:
msdf=pd.DataFrame(np.random.uniform(0,0,m))
#Concatenating All the dataframes for Simulation Data
df=pd.concat([gdpdf,pidf,msdf,advdf],axis=1)
simid=pd.DataFrame(np.random.choice([i+1],m))
dd=pd.concat([simid,gdpdf,pidf,advdf,msdf],axis=1)
dd.columns=['Year','Gdp','Pi','Adv','Ms']
simdata=pd.concat([simdata,dd],axis=0)
#Predicting the Data And store in pred data through onhand Regression Method
dfs=pd.DataFrame(regr.predict(df))
datatable=pd.concat([simid,date,dfs],axis=1)
datatable.columns=['simid','Year','Total_Demand(Tonnes)']
preddata=pd.concat([datatable,preddata],axis=0)
datas=list()
#Geting Data With Respective Dates
# print(datatable)
for row in dates:
# print(dates)
datas.extend(datatable.loc[datatable['Year'] ==row, 'Total_Demand(Tonnes)'])
kkk=pd.DataFrame(datas)
me=ME(Fetchdata,kkk)
mae=MAE(Fetchdata,kkk)
mape=MAPE(Fetchdata,kkk)
dfe=pd.DataFrame([me,mae,mape],index=['ME','MAE','MAPE']).T
Errormsr=pd.concat([Errormsr,dfe],axis=0).reset_index(drop=True)
return preddata,simdata,Errormsr
preddata,simdata,Errormsr=sim(itr,data,sfrm,sto,atrtable,**kwargs)
engine = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
preddata.to_sql(con=engine, name='predicteddata',index=False, if_exists='replace')
engine2 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
simdata.to_sql(con=engine2, name='simulationdata',index=False, if_exists='replace')
con.commit()
engine3 = create_engine("mysql+pymysql://{user}:{pw}@localhost/{db}".format(user="root",pw="",db="inventory_management"))
Errormsr.to_sql(con=engine3, name='simerror',index=False, if_exists='replace')
con.commit()
cnr=con.cursor()
cnr.execute("SELECT * FROM `simerror`")
sdata = cnr.fetchall()
simerror = pd.DataFrame(sdata)
con.close()
return render_template('ysimulation.html',sayy=1,simerror=simerror.to_html(index=False))
return render_template('ysimulation.html')
##PROCURMENT PLANNING
@app.route('/procurementplanning')
def procurementplanning():
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerate", methods=['GET','POST'])
def criteriagenerate():
if request.method == 'POST':
global cnmbr
global vnmbr
cnmbr = int(request.form['cnmbr'])
vnmbr = int(request.form['vnmbr'])
if cnmbr == 0 or vnmbr==0:
return render_template('criterianumberask.html',warning='Data Invalid')
cmainlist=[]
global cnames
cnames = []
for i in range (1,cnmbr+1):
lst=[]
name='cname'+str(i)
lst.append(i)
lst.append(name)
cmainlist.append(lst)
cnames.append(name)
vmainlist=[]
global vnames
vnames = []
for i in range (1,vnmbr+1):
lst=[]
name='vname'+str(i)
lst.append(i)
lst.append(name)
vmainlist.append(lst)
vnames.append(name)
return render_template('vendorselection_criteriagenerate.html',cmainlist=cmainlist,vmainlist=vmainlist)
return render_template('vendorselection_criterianumberask.html')
@app.route("/criteriagenerated", methods=['GET','POST'])
def criteriagenerated():
if request.method == 'POST':
global criterianames
criterianames=[]
for name in cnames:
criterianame = request.form[name]
criterianames.append(criterianame)
global vendornames
vendornames=[]
for name in vnames:
vendorname = request.form[name]
vendornames.append(vendorname)
mcrlst=[]
cn=len(criterianames)
k=1
global maincriteriaoption
maincriteriaoption=[]
global maincritriacri
maincritriacri=[]
for i in range(cn-1):
for j in range (i+1,cn):
cri='criteriaorder'+str(k)
opt='coption'+str(k)
crlst=[k,cri,criterianames[i],criterianames[j],opt]
mcrlst.append(crlst)
k=k+1
maincriteriaoption.append(opt)
maincritriacri.append(cri)
mvrlst=[]
vn=len(vendornames)
k=1
global mainvendoroption
mainvendoroption=[]
global mainvendorcri
mainvendorcri=[]
for z in criterianames:
mvrlst1=[]
vcri=[]
vopt=[]
for i in range(vn-1):
for j in range (i+1,vn):
cri='vendororder'+z+str(k)
opt='voption'+z+str(k)
vrlst=[k,cri,vendornames[i],vendornames[j],opt]
mvrlst1.append(vrlst)
k=k+1
vcri.append(cri)
vopt.append(opt)
mvrlst.append(mvrlst1)
mainvendorcri.append(vcri)
mainvendoroption.append(vopt)
return render_template('vendorselection_maincriteria.html',mcrlst=mcrlst,mvrlst=mvrlst)
return render_template('vendorselection_criteriagenerated.html')
def tablecreator(imp,val,crit):
n=len(imp)
for i in range(n):
if imp[i]==1:
val[i]=float(1/val[i])
fdata=pd.DataFrame(columns=[crit],index=[crit])
i=0
k=0
for index in fdata.index:
j=0
for columns in fdata.columns:
if i==j:
fdata[index][columns]=1
if i<j:
fdata[index][columns]=round((float(val[k])),2)
fdata[columns][index]=round((1/val[k]),2)
k=k+1
j=j+1
i=i+1
return fdata
@app.route("/criteriaread", methods=['GET','POST'])
def criteriaread():
if request.method == 'POST':
importances = []
values = []
for name1 in maincritriacri:
imp = int(request.form[name1])
importances.append(imp)
for name2 in maincriteriaoption:
val = int(request.form[name2])
values.append(val)
#global maincriteriadata
maincriteriadata=tablecreator(importances,values,criterianames)
mainimportances=[]
for crioption in mainvendorcri:
importance=[]
for option1 in crioption:
impc = int(request.form[option1])
importance.append(impc)
mainimportances.append(importance)
mainvalues=[]
for vendoroption in mainvendoroption:
vvalues=[]
for option2 in vendoroption:
valuev = int(request.form[option2])
vvalues.append(valuev)
mainvalues.append(vvalues)
maindf=[]
for z in range(len(criterianames)):
df=tablecreator(mainimportances[z],mainvalues[z],vendornames)
maindf.append(df)
dictmain={'crit':maincriteriadata}
names=criterianames
dfs=maindf
dictionary=dict((n,d) for (n,d) in zip(names,dfs))
def ahpmain(dictmain):
global wt_Crit
wt_Crit=[]
key=[]
key=list(dictmain.keys())
for i in key:
Crit=np.dot(dictmain[i],dictmain[i])
row_sum=[]
for j in range(len(Crit)):
row_sum.append(sum(Crit[j]))
wt_Crit.append([s/sum(row_sum) for s in row_sum])
Crit=[]
return wt_Crit
def ahp(dictmain,dictionary):
global output
main= ahpmain(dictmain)
submain= ahpmain(dictionary)
dd=pd.DataFrame(submain).T
df=pd.DataFrame(main).T
output=np.dot(dd,df)
return output,dd
yaxis,dd=ahp(dictmain,dictionary)
yax=pd.DataFrame(yaxis,index=vendornames,columns=['Score']).sort_values('Score',ascending=False).T
ynames=yax.columns
yval=yax.T.values
dd.index=vendornames
dd.columns=names
dd=dd.T
opq23=[]
for column in dd.columns:
opq21=[]
opq22=[]
opq21.append(column)
for val in dd[column]:
opq22.append(val)
opq21.append(opq22)
opq23.append(opq21)
return render_template('vendorselection_ahp_final_output.html',ynames=ynames,yval=yval,dd=opq23,names=names)
return render_template('vendorselection_criteriagenerated.html')
#DETERMINISTIC STARTS
@app.route("/spt")
def spt():
return render_template('SinglePeriod.html')
@app.route("/ppbreak")
def ppbreak():
return render_template('pbreak.html')
@app.route('/pbrk', methods=['GET','POST'])
def pbrk():
return render_template('pbrk.html')
@app.route('/eoq', methods=['GET','POST'])
def eoq():
##setUpCost::setting up cost prior(>>setUpCost;<<moving rate)
AnnulaUnitsDemand=100##purchase demand of product per year
FixedCost=500 ##cost fixed for the product
AnnHoldingcost=0.25 ##remaining goods cost
UnitCost=445 ##purchasing cost
LeadTime=10 ##time b/w initiation and completion of a production process.
SafetyStock=100##extra stock
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
FixedCost=float(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=float(UnitCost)
LeadTime=float(LeadTime)
SafetyStock=float(SafetyStock)
sgap=1
pgap=1
HoldingCost=AnnHoldingcost*UnitCost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))*sgap),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=round((EOQ*.75),0)
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totHoldCost+totOrderCost)
while (count < (EOQ*2)):
qtylist1.append(count)
hclist.append(round((count/2*HoldingCost),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round((count/2*HoldingCost+AnnulaUnitsDemand/count*FixedCost),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
# sstock=int(math.sqrt((LeadTime^2)+(int(ReorderPoint)^2)))
return render_template('eoq.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,LeadTime=LeadTime,
SafetyStock=SafetyStock)
########################EEEEppppppppppQQQQQQ############
########################EEEEppppppppppQQQQQQ############
@app.route('/eproduction', methods=['GET','POST'])
def eproduction():
AnnulaUnitsDemand=100
Prodrate=125
FixedCost=500
AnnHoldingcost=0.1
UnitCost=25000
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
Prodrate=request.form['Prodrate']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
Prodrate=int(Prodrate)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
if(Prodrate<=AnnulaUnitsDemand):
return render_template('eproduction.html',warning='Production date should not be least than Annual Demand',
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
pgap=round((1-(AnnulaUnitsDemand/Prodrate)),2)
HoldingCost=float(AnnHoldingcost*UnitCost)
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),2)
REOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost*pgap))),0)
totOrderCost=round((FixedCost*AnnulaUnitsDemand/EOQ),2)
totHoldCost=round(((HoldingCost*EOQ*pgap)/2),2)
TotalCost=round((totOrderCost+totHoldCost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count=EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
tclist.append(totOrderCost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int(count))
hclist.append(round((count/2*HoldingCost*pgap),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
tclist.append(round(((count/2*HoldingCost*pgap+AnnulaUnitsDemand/count*FixedCost)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eproduction.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,Prodrate=Prodrate,
LeadTime=LeadTime,SafetyStock=SafetyStock
)
######################EEEEppppppppppQQQQQQ############
######################EEEEppppppppppQQQQQQ############
@app.route('/eoq_backorders', methods=['GET','POST'])
def eoq_backorders():
AnnulaUnitsDemand=12000
shortcost=1.1
FixedCost=8000
AnnHoldingcost=0.3
UnitCost=1
LeadTime=10
SafetyStock=100
if request.method == 'POST':
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
shortcost=request.form['shortcost']
FixedCost=request.form['FixedCost']
AnnHoldingcost=request.form['AnnHoldingcost']
UnitCost=request.form['UnitCost']
LeadTime=request.form['LeadTime']
SafetyStock=request.form['SafetyStock']
AnnulaUnitsDemand=int(AnnulaUnitsDemand)
shortcost=int(shortcost)
FixedCost=int(FixedCost)
AnnHoldingcost=float(AnnHoldingcost)
UnitCost=int(UnitCost)
LeadTime=int(LeadTime)
SafetyStock=int(SafetyStock)
HoldingCost=float(AnnHoldingcost*UnitCost)
sgap=(shortcost+HoldingCost)/shortcost
EOQ=round((math.sqrt((2*AnnulaUnitsDemand*FixedCost)/HoldingCost))*(math.sqrt(sgap)),2)
REOQ=round(math.sqrt((2*AnnulaUnitsDemand*FixedCost)/(HoldingCost)*sgap),0)
totbackorder=EOQ*(HoldingCost/(shortcost+HoldingCost))
totOrderCost=round(((FixedCost*AnnulaUnitsDemand)/EOQ),2)
totHoldCost=round(((HoldingCost*((EOQ-totbackorder)**2))/(2*EOQ)),2)
totshortcost=round((shortcost*(totbackorder**2)/(2*EOQ)),2)
TotalCost=round((totOrderCost+totHoldCost+totshortcost),2)
NumOrders=round((AnnulaUnitsDemand/EOQ),2)
OrderTime=round((365/NumOrders),2)
ReorderPoint=round((((AnnulaUnitsDemand/365)*LeadTime)+SafetyStock),0)
count= EOQ*.75
qtylist1=[]
hclist=[]
sclist=[]
mtlist=[]
shlist=[]
tclist=[]
while (count < EOQ):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
qtylist1.append(EOQ)
hclist.append(totHoldCost)
sclist.append(totOrderCost)
shlist.append(totshortcost)
tclist.append(totOrderCost+totshortcost+totHoldCost)
while (count < (EOQ*1.7)):
qtylist1.append(int((count)))
hclist.append(round(((HoldingCost*((count-totbackorder)**2))/(2*count)),2))
sclist.append(round((AnnulaUnitsDemand/count*FixedCost),2))
mtlist.append(round((AnnulaUnitsDemand*UnitCost),2))
shlist.append(round((shortcost*((totbackorder)**2)/(2*count)),2))
tclist.append(round(((((HoldingCost*((count-totbackorder)**2))/(2*count))+AnnulaUnitsDemand/count*FixedCost)+shortcost*((totbackorder)**2)/(2*count)),2))
count +=2
val=0
for i in range(len(tclist)):
if(EOQ==qtylist1[i]):
val=i
return render_template('eoq_backorders.html',NumOrders=NumOrders,OrderTime=OrderTime,
ReorderPoint=ReorderPoint,HoldCost=totHoldCost,TotalCost=TotalCost,
EOQ=EOQ,REOQ=REOQ,
shlist=shlist,sclist=sclist,hclist=hclist,tclist=tclist,val=val,qtylist1=qtylist1,
AnnulaUnitsDemand=AnnulaUnitsDemand,FixedCost=FixedCost,
AnnHoldingcost=AnnHoldingcost,UnitCost=UnitCost,shortcost=shortcost,
LeadTime=LeadTime,SafetyStock=SafetyStock)
#################pbreak######################
@app.route("/pbreak_insert", methods=['GET','POST'])
def pbreak_insert():
if request.method == 'POST':
quantity = request.form.getlist("quantity[]")
price = request.form.getlist("price[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("CREATE TABLE IF NOT EXISTS `pbreaktable` (quantity int(8),price int(8))")
curr.execute("DELETE FROM `pbreaktable`")
conn.commit()
say=1
for i in range(len(quantity)):
quantity_clean = quantity[i]
price_clean = price[i]
if quantity_clean and price_clean:
curr.execute("INSERT INTO `pbreaktable`(`quantity`,`price`) VALUES('"+quantity_clean+"','"+price_clean+"')")
conn.commit()
else:
say=0
if say==0:
message="Some values were not inserted!"
else:
message="All values were inserted!"
return(message)
@app.route('/view', methods=['GET','POST'])
def view():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress=pd.DataFrame(res)
return render_template('pbrk.html',username=username,ress =ress.to_html())
@app.route('/pbreakcalculate', methods=['GET','POST'])
def pbreakcalculate():
AnnulaUnitsDemand=10
FixedCost=1
AnnHoldingcost=0.1
UnitCost=445
LeadTime=10
SafetyStock=100
if request.method == 'POST':
if request.form['AnnulaUnitsDemand']:
AnnulaUnitsDemand= request.form['AnnulaUnitsDemand']
AnnulaUnitsDemand=float(AnnulaUnitsDemand)
if request.form['FixedCost']:
FixedCost=request.form['FixedCost']
FixedCost=float(FixedCost)
if request.form['AnnHoldingcost']:
AnnHoldingcost=request.form['AnnHoldingcost']
AnnHoldingcost=float(AnnHoldingcost)
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_classification',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curr = conn.cursor()
curr.execute("SELECT * FROM `pbreaktable`")
res = curr.fetchall()
ress= | pd.DataFrame(res) | pandas.DataFrame |
import pandas as pd
from paths import RAW_PATH, TREAT_PATH
from datetime import datetime as dt
# from unidecode import unidecode
import numpy as np
def normalize_cols(df):
return pd.Series(df).str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8').str.upper()
def create_region_id(city, state):
"""
Join and clean city + state names to cross databases.
"""
if city:
city_state = (state + ' ' + city.upper()).replace(' ', '_').replace("'", '')
return city_state
return ''
def treat_covid19br(filepath, to_path):
df = pd.read_csv(RAW_PATH / filepath)
# Remove total rows
df = df[df['state'] != 'TOTAL']
# Fix columns
df = df.rename({'totalCases': 'confirmed', 'newCases': 'new_confirmed'}, axis=1)
df['place_type'] = 'city'
# Create id for join
df['region_id'] = df['city'].str.replace('/', ' ')
df['region_id'] = normalize_cols(df['region_id'])
# Get only last day data for each city
df = df.drop_duplicates(subset=['city'], keep='first')
df = df.sort_values(by='confirmed', ascending=False)
cols = ['region_id', 'city', 'place_type', 'date', 'confirmed']
# Save treated dataset
df[cols].to_csv(TREAT_PATH / to_path)
return df[cols]
def treat_brasilio(df):
# df = pd.read_csv(RAW_PATH / filepath)
# Fix city names
df['city'] = df['city'].fillna('').str.replace('\'', '')
# Create id for join
df['region_id'] = df.apply(lambda row: create_region_id(row['city'], row['state']), axis=1)
df['region_id'] = normalize_cols(df['region_id'])
# get only the cities
mask = ((df['place_type']=='city') & (df['city_ibge_code'].notnull()))
df = df[mask].sort_values(by = ['region_id','date'])
df['confirmed_shift'] = df['confirmed'].shift(1)
df['city_ibge_code_shift'] = df['city_ibge_code'].shift(1)
df['confirmed_shift'] = np.where(df['city_ibge_code_shift']!=df['city_ibge_code'], np.nan , df['confirmed_shift'])
df['new_cases'] = df['confirmed'] - df['confirmed_shift']
df['new_cases'] = np.where(df['confirmed_shift'].isnull(), df['confirmed'] , df['new_cases'])
mask = ((df['new_cases']!=0) & (df['new_cases']>0) & (df['is_last']==True))
df['update'] = np.where(mask,True,False)
mask = df['is_last']==True
df = df[mask].rename(columns={"confirmed":'confirmed_real','deaths':'deaths_real'})
cols = ['region_id', 'date', 'confirmed_real','deaths_real','update']
return df[cols]
def treat_sus(filepath, to_path):
df = pd.read_csv(RAW_PATH / filepath)
# Create id for join
df['region_id'] = df.apply(lambda row: create_region_id(row['municipio'], row['uf']), axis=1)
df['region_id'] = normalize_cols(df['region_id'])
# Add available respirator column
df['ventiladores_disponiveis'] = df['ventiladores_existentes'] - df['ventiladores_em_uso']
cols = ['region_id', 'municipio', 'uf', 'populacao', 'quantidade_leitos', 'ventiladores_existentes']
df = df[cols].fillna(0)
# remove apostoflo
df['municipio'] = df['municipio'].str.replace('\'', '')
df['region_id'] = df['region_id'].str.replace('\'', '')
# Get the information for states
df_states = df.groupby(by='uf', as_index=False).sum()
df_states['municipio'] = df_states['uf']
df_states['region_id'] = df_states['uf']
# Put states in same order as df
cols = df.columns
df_states = df_states[cols]
# Concat states information with sus information
df = pd.concat([df_states,df],axis=0)
# Save treated dataset
df.to_csv(TREAT_PATH / to_path)
return df
def treat_cities_cases(df_cases_brasilio,sus_cap,sus_regions):
cities_cases = | pd.merge(sus_cap, df_cases_brasilio, on='region_id', how='left') | pandas.merge |
import os
import pandas as pd
from collections import Counter
import numpy as np
dataPath = './data/'
fileMergedName = 'street_name_popular.csv'
path = 'street_name/'
files = os.listdir(dataPath + path)
## merge all the data
Data = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas
from c3x.data_loaders import configfileparser, nextgen_loaders
from c3x.data_statistics import statistics as stats
# Reads a config file to produce a dictionary that can be handed over to functions
config = configfileparser.ConfigFileParser("config/config_nextGen_stats.ini")
data_paths = config.read_data_path()
batch_info = config.read_batches()
measurement_types = config.read_data_usage()
# Create a nextGen data object that has working paths and can be sliced using batches
# it might be appropriate for the example to make the batches smaller, however that may
# increase computing time,
# the next row can be commented if data was processed prior to running this script
nextgen = nextgen_loaders.NextGenData(data_name='NextGen',
source=data_paths["source"],
batteries=data_paths["batteries"],
solar=data_paths["solar"],
node=data_paths["node"],
loads=data_paths["loads"],
results=data_paths["results"],
stats = data_paths["stats"],
number_of_batches=batch_info["number_of_batches"],
files_per_batch=batch_info["files_per_batch"],
concat_batches_start=batch_info["concat_batches_start"],
concat_batches_end=batch_info["concat_batches_end"])
# now we have a folder structure with lots of files with batch numbers
print("ALL BATCHES ANALYSIS")
node_count, batch_list = stats.batch_with_highest_node_count(data_dir=data_paths,
batch_info=batch_info,
measurement_types=measurement_types)
print("max number of nodes: ", node_count)
print("batches with max node count: ", batch_list)
print("number of batches with that node count: ", len(batch_list))
data_path_list = []
data_files = []
sorted_dict = {}
node_list = []
# here a dictionary is generate that holds a list of nodes per batch (batch:[node_ids])
for batch in range(batch_info["number_of_batches"]):
node_list = stats.nodes_per_batch(data_paths, batch, measurement_types)
sorted_dict[batch] = node_list
# a list of all files is created
for data_type in measurement_types:
path = data_paths[data_type]
for file in os.listdir(data_paths[data_type]):
data_files.append(os.path.join(path, file))
# some Data Frames and Labels for saving results nicely
result_data_frame = pandas.DataFrame()
batch_data_results = pandas.DataFrame()
index = ['Battery - PLG',
'Battery - QLG',
'Battery - RC',
'Solar - PLG',
'Load - PLG',
'Load - QLG']
columns = pandas.MultiIndex.from_product([['Samples', 'Duplicates'], index],
names=['Type', 'Measurement'])
# iterate through batches
for batch in range(batch_info["number_of_batches"]):
batch_data = pandas.DataFrame()
# iterate through nodes
result_data_frame = pandas.DataFrame()
for node in sorted_dict[batch]:
node_data = pandas.DataFrame()
search = str(node) + "_" + str(batch) + ".npy"
batch_node_subset = [val for i, val in enumerate(data_files) if val.endswith(search)]
# build a data frame with all measurement data
first_run = True
for path in batch_node_subset:
tmp_data_frame = pandas.read_pickle(path)
if first_run is True:
node_data = pandas.DataFrame(tmp_data_frame)
first_run = False
else:
node_data = pandas.concat([node_data, tmp_data_frame], axis=1)
# get the node ID
node_df = pandas.DataFrame(pandas.Series(node))
node_df.columns = ["node"]
# count samples and convert to data frame
samples = pandas.Series(stats.count_samples(node_data))
samples = pandas.DataFrame(samples).transpose()
# count duplicates anc convert to data frame
duplicates = pandas.Series(stats.count_duplictaes(node_data))
duplicates = pandas.DataFrame(duplicates).transpose()
# concat and rename nicely
samples_dupli = | pandas.concat([samples, duplicates], axis=1) | pandas.concat |
from pandas import Series, DataFrame
import pandas as pd
from collections import OrderedDict
from .read.readmdb import ReadMdb
class MapTables:
"""
Collection of tables with data related to vegetation map polygons
Methods
-------
get_vegtype
Return vegetation type for each mapped element.
get_mapspecies
Return species data attached to mapped elements.
get_abiotiek
Return environmental field observatons attached to mapped
elements.
get_pointspecies
Return point locations for mapped plant species.
get_year
Return year of mapping, returns 0000 if no dates are present.
Classmethods
------------
from_mdb
Create MapTables object from Microsoft Access mdb filepath.
Notes
-----
A mapped element can be a polygon or a line. Spatial data for these
elements are stored in shapefiles and linked to the table data by
the attribute ElmID.
"""
_DS_tablenames = ['vegetatietype','sbbtype']
_mapping_colnames = OrderedDict({
'Element' : {
'intern_id' : 'locatie_id',
},
'KarteringVegetatietype' : {
'locatie': 'locatie_id',
'vegetatietype': 'vegtype_code',
'bedekking':'vegtype_bedekkingcode',
'bedekking_num':'vegtype_bedekkingnum',
},
'VegetatieType' : {
'typenummer':'vegtype_nr',
'code':'vegtype_code',
'gemeenschap':'vegtype_naam',
'vorm':'vegtype_vorm',
'sbbtype':'sbbcat_id',
'sbbtype2':'sbbcat2_id',
'opmerking':'vegtype_note',
},
'SbbType': {
'cata_id':'sbbcat_id',
'versie':'sbbcat_versie',
'code':'sbbcat_code',
'klassenaamned':'sbbcat_klassenaam',
'verbrgnaamned':'sbbcat_kortenaam',
'asscocrgnaamned':'sbbcat_assrgnaam',
'subassocnaamned':'sbbcat_subassnaam',
'landtypened':'sbbcat_nednaam',
'landtypewet':'sbbcat_wetnaam',
'vervallen':'sbbcat_vervallen',
'vervangbaarheid':'sbbcat_vervangbaarheid',
},
'KarteringSoort': {
'locatie':'locatie_id',
'soortcode':'krtsrt_srtcode',
'bedekking':'krtsrt_bedcode',
'aantalsklasse':'krtsrt_aantalsklasse',
'bedekking_num':'krtsrt_bednum',
},
'CbsSoort':{
'soortnr':'cbs_srtcode',
'floron':'cbs_floron',
'wetenschap':'cbs_srtwet',
'nederlands':'cbs_srtned',
'zeldzaamheidsklasse':'cbs_zeldzaamheid',
'trendklasse':'cbs_trend',
'rl2000':'cbs_rl2000',
'rl2000kort':'cbs_rl2000kort',
},
'PuntLocatieSoort': {
'id':'pntid',
'loctype':'pntloctype',
'x_coord':'xcr',
'y_coord':'ycr',
'groep':'srtgroep',
'nummer':'srtnr',
'naam':'srtnednaam',
'wetens':'srtwetnaam',
'sbb_kl':'srtsbbkl',
'tansley':'srttansley',
'datum':'srtdatum',
'waarn':'srtwrnmr',
'opm':'srtopm',
},
'KarteringAbiotiek': {
'locatie':'locatie_id',
'abiotiek':'abio_code',
},
'Abiotiek': {
'code':'abio_code',
'omschrijving':'abio_wrn',
},
})
def __init__(self,tables=None,mdbpath=None):
"""MapTables constructor.
Parameters
----------
tables : OrderedDict
Dictionary of tables from mdb file
mdbpath : string
Filepath to mdb sourcefile (for userwarnings).
"""
self._tbldict = tables
if mdbpath is None:
mdbpath = ''
self._filepath = mdbpath
# numeric to string type
self._tbldict['Element'] = self._tbldict['Element'].astype(
{'locatie_id':str,'elmid':str,})
self._tbldict['KarteringVegetatietype']=self._tbldict['KarteringVegetatietype'].astype(
{'locatie_id':str,})
self._tbldict['VegetatieType'] = self._tbldict['VegetatieType'].astype(
{'sbbcat_id': str, 'sbbcat2_id': str,})
self._tbldict['SbbType'] = self._tbldict['SbbType'].astype(
{"sbbcat_id": str, "sbbcat_versie": str, "sbbcat_vervangbaarheid": str,})
self._tbldict['KarteringSoort']=self._tbldict['KarteringSoort'].astype(
{'locatie_id':str,'krtsrt_srtcode':str,})
self._tbldict['CbsSoort']=self._tbldict['CbsSoort'].astype(
{'cbs_srtcode':str,})
self._tbldict['KarteringAbiotiek']=self._tbldict['KarteringAbiotiek'].astype(
{'locatie_id':str,})
# change vevangbaarheid 5.0 to 5 stingtype
self._tbldict['SbbType']['sbbcat_vervangbaarheid']=self._tbldict['SbbType']['sbbcat_vervangbaarheid'].str[:1]
# convert column locatietype to lowercase
# (locatietype can be: 'v','l','V','L')
self._tbldict['Element']['locatietype'] = self._tbldict['Element']['locatietype'].str.lower()
# fix small errors that occur in just a few (or just one) mdbfiles
# smallfix01
colnames = self._tbldict['Element'].columns
if ((not 'sbbtype' in colnames) and ('sbbtype1' in colnames)):
self._tbldict['Element']=self._tbldict['Element'].rename(
columns={'sbbtype1':'sbbtype'})
def __repr__(self):
return f'MapTables (n={self.__len__()})'
def __len__(self):
return len(self._tbldict['Element'])
@classmethod
def from_mdb(cls,filepath):
"""
Create MapTables object from Microsoft Access mdb filepath."
Parameters
----------
filepath : str
valid filepath to Microsoft Access mdb file
"""
if not isinstance(filepath,str):
fptype = type(filepath)
raise ValueError (f'Parameter filepath must be type "str" '
f'not type {fptype}.')
# open mdb file and check format is Digitale Standaard
mdb = ReadMdb(filepath)
if not mdb.all_tables():
raise Exception(f'{mdb._filepath} is not a valid Digitale Standaard database.')
#if any([item in mdb.tablenames() for item in cls._DS_tablenames]):
# raise Exception(f'{mdb} is not a valid Digitale Standaard database.')
# all mdb tables to dict
mdbtables = mdb.all_tables()
maptables = {}
for tblname in mdbtables.keys():
mdbtbl = mdbtables[tblname]
mdbtbl.columns = map(str.lower,mdbtbl.columns)
if tblname in cls._mapping_colnames.keys():
mdbtbl = mdbtbl.rename(columns=cls._mapping_colnames[tblname])
maptables[tblname] = mdbtbl
return cls(tables=maptables,mdbpath=filepath)
def get_vegtype(self):
"""
Return vegetation type for each mapped element.
Notes
-----
A mapped element can have multiple vegetation types. Therefore
the table returned can have multiple rows with the same value
for the map polygon id elmid.
The fields bedekking and bedekking_num show the cover a
vegetation type has within a map polygon.
"""
elmcolnames = ['locatie_id', 'elmid', 'locatietype', 'datum']
element = self._tbldict['Element'][elmcolnames]
isvlak = element['locatietype']=='v'
element = element[isvlak].copy()
vegloc = self._tbldict['KarteringVegetatietype']
element = pd.merge(element,vegloc,left_on='locatie_id',
right_on='locatie_id',suffixes=(None,'_vegloc'),
validate='one_to_many')
vegtype = self._tbldict['VegetatieType']
element = pd.merge(element,vegtype,left_on='vegtype_code',
right_on='vegtype_code',suffixes=(None,'_vegtype'),
validate='many_to_one')
sbbtype = self._tbldict['SbbType']
element = pd.merge(element,sbbtype,left_on='sbbcat_id',
right_on='sbbcat_id',suffixes=(None,'sbbtype'),
validate='many_to_one')
element['datum'] = element['datum'].apply(lambda x: x.strftime(
'%d%m%Y') if not pd.isna(x) else '')
colnames = ['elmid','datum','locatietype','vegtype_code',
'vegtype_naam','vegtype_vorm','vegtype_bedekkingcode',
'vegtype_bedekkingnum',
'sbbcat_code', 'sbbcat_wetnaam','sbbcat_nednaam',
'sbbcat_kortenaam','sbbcat_vervangbaarheid']
element = element[colnames]
return element.copy()
def get_pointspecies(self):
"""Return table of point locations for mapped plant species"""
pntsrt = self._tbldict['PuntLocatieSoort'].copy()
pntsrt['srtdatum'] = pntsrt['srtdatum'].apply(
lambda x: x.strftime('%d%m%Y') if not pd.isna(x) else '')
return pntsrt
def get_year(self):
"""Return year of mapping, returns 0000 if no dates are present"""
dates = | pd.to_datetime(self._tbldict['Element']['datum'],errors='coerce') | pandas.to_datetime |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
date: 2021/9/28 16:02
desc: 东方财富网-数据中心-特色数据-机构调研
http://data.eastmoney.com/jgdy/
东方财富网-数据中心-特色数据-机构调研-机构调研统计: http://data.eastmoney.com/jgdy/tj.html
东方财富网-数据中心-特色数据-机构调研-机构调研详细: http://data.eastmoney.com/jgdy/xx.html
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_em_jgdy_tj(start_date: str = "20180928") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-机构调研-机构调研统计
http://data.eastmoney.com/jgdy/tj.html
:param start_date: 开始时间
:type start_date: str
:return: 机构调研统计
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,SUM,RECEIVE_START_DATE,SECURITY_CODE',
'sortTypes': '-1,-1,-1,1',
'pageSize': '500',
'pageNumber': '1',
'reportName': 'RPT_ORG_SURVEYNEW',
'columns': 'ALL',
'quoteColumns': 'f2~01~SECURITY_CODE~CLOSE_PRICE,f3~01~SECURITY_CODE~CHANGE_RATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(NUMBERNEW="1")(IS_SOURCE="1")(RECEIVE_START_DATE>'{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"序号",
"_",
"代码",
"名称",
"_",
"公告日期",
"接待日期",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"接待地点",
"_",
"接待方式",
"_",
"接待人员",
"_",
"_",
"_",
"_",
"_",
"接待机构数量",
"_",
"_",
"_",
"_",
"_",
"_",
"涨跌幅",
"最新价",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"接待机构数量",
"接待方式",
"接待人员",
"接待地点",
"接待日期",
"公告日期",
]
]
big_df['最新价'] = pd.to_numeric(big_df['最新价'], errors="coerce")
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'], errors="coerce")
big_df['接待机构数量'] = pd.to_numeric(big_df['接待机构数量'], errors="coerce")
big_df['接待日期'] = pd.to_datetime(big_df['接待日期']).dt.date
big_df['公告日期'] = pd.to_datetime(big_df['公告日期']).dt.date
return big_df
def stock_em_jgdy_detail(start_date: str = "20180928") -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-机构调研-机构调研详细
http://data.eastmoney.com/jgdy/xx.html
:param start_date: 开始时间
:type start_date: str
:return: 机构调研详细
:rtype: pandas.DataFrame
"""
url = "http://datacenter-web.eastmoney.com/api/data/v1/get"
params = {
'sortColumns': 'NOTICE_DATE,RECEIVE_START_DATE,SECURITY_CODE,NUMBERNEW',
'sortTypes': '-1,-1,1,-1',
'pageSize': '50000',
'pageNumber': '1',
'reportName': 'RPT_ORG_SURVEY',
'columns': 'ALL',
'quoteColumns': 'f2~01~SECURITY_CODE~CLOSE_PRICE,f3~01~SECURITY_CODE~CHANGE_RATE',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(IS_SOURCE="1")(RECEIVE_START_DATE>'{'-'.join([start_date[:4], start_date[4:6], start_date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
total_page = data_json['result']['pages']
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
params.update({"pageNumber": page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = big_df.append(temp_df)
big_df.reset_index(inplace=True)
big_df["index"] = list(range(1, len(big_df) + 1))
big_df.columns = [
"序号",
"_",
"代码",
"名称",
"_",
"公告日期",
"调研日期",
"_",
"_",
"_",
"调研机构",
"_",
"_",
"_",
"接待地点",
"_",
"接待方式",
"调研人员",
"接待人员",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"机构类型",
"_",
"_",
"_",
"_",
"_",
"最新价",
"涨跌幅",
]
big_df = big_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"调研机构",
"机构类型",
"调研人员",
"接待方式",
"接待人员",
"接待地点",
"调研日期",
"公告日期",
]
]
big_df['最新价'] = pd.to_numeric(big_df['最新价'], errors="coerce")
big_df['涨跌幅'] = pd.to_numeric(big_df['涨跌幅'], errors="coerce")
big_df['调研日期'] = pd.to_da | tetime(big_df['调研日期']) | pandas.to_datetime |
#!python
# builtin
import os
import sys
import logging
import json
import time
import contextlib
import multiprocessing
import urllib
import csv
# external
import numpy as np
import pandas as pd
import h5py
import pyteomics.mgf
# local
from ion_networks._version import __version__ as VERSION
from ion_networks import numba_functions
GITHUB_VERSION_FILE = "https://raw.githubusercontent.com/swillems/ion_networks/master/ion_networks/_version.py"
BASE_PATH = os.path.dirname(__file__)
UPDATE_COMMAND = os.path.join(os.path.dirname(BASE_PATH), "install", "update.sh")
LIB_PATH = os.path.join(BASE_PATH, "lib")
DEFAULT_PARAMETER_PATH = os.path.join(LIB_PATH, "default_parameters")
DEFAULT_PARAMETER_FILES = {
"convert": "convert_parameters.json",
"create": "create_parameters.json",
"evidence": "evidence_parameters.json",
"interface": "interface_parameters.json",
"database": "database_parameters.json",
"annotation": "annotation_parameters.json",
"mgf": "mgf_parameters.json",
}
DATA_TYPE_FILE_EXTENSIONS = {
"DDA": ".mgf",
"SONAR": "_Apex3DIons.csv",
"HDMSE": "_Apex3DIons.csv",
"SWIMDIA": "_Apex3DIons.csv",
"DIAPASEF": "_centroids.hdf",
}
LOGGER = logging.getLogger("Ion-networks")
MAX_THREADS = 1
@contextlib.contextmanager
def open_logger(log_file_name, log_level=logging.INFO):
# TODO: Docstring
start_time = time.time()
formatter = logging.Formatter('%(asctime)s > %(message)s')
LOGGER.setLevel(log_level)
if not LOGGER.hasHandlers():
console_handler = logging.StreamHandler(stream=sys.stdout)
console_handler.setLevel(log_level)
console_handler.setFormatter(formatter)
LOGGER.addHandler(console_handler)
if log_file_name is not None:
if log_file_name == "":
log_file_name = BASE_PATH
else:
log_file_name = os.path.abspath(log_file_name)
if os.path.isdir(log_file_name):
log_file_name = os.path.join(log_file_name, "log.txt")
directory = os.path.dirname(log_file_name)
if not os.path.exists(directory):
os.makedirs(directory)
file_handler = logging.FileHandler(log_file_name, mode="a")
file_handler.setLevel(log_level)
file_handler.setFormatter(formatter)
LOGGER.addHandler(file_handler)
LOGGER.info("=" * 50)
LOGGER.info(f"COMMAND: ion_networks {' '.join(sys.argv[1:])}")
LOGGER.info(f"VERSION: {VERSION}")
LOGGER.info(f"LOGFILE: {log_file_name}")
LOGGER.info("")
try:
yield LOGGER
LOGGER.info("")
LOGGER.info("Successfully finished execution")
except:
LOGGER.info("")
LOGGER.exception("Something went wrong, execution incomplete!")
finally:
LOGGER.info(f"Time taken: {time.time() - start_time}")
LOGGER.info("=" * 50)
if log_file_name is not None:
LOGGER.removeHandler(file_handler)
def read_parameters_from_json_file(file_name="", default=""):
"""
Read a custom or default parameter file.
Parameters
----------
default : str
The default parameters that should be loaded. Options are:
"create"
"evidence"
"interface"
""
file_name : str
The name of a .json file that contains parameters defined by the user.
These will override the default parameters.
Returns
-------
dict
A dictionary with parameters.
"""
if default == "":
parameters = {"log_file_name": ""}
else:
default_parameter_file_name = os.path.join(
DEFAULT_PARAMETER_PATH,
DEFAULT_PARAMETER_FILES[default]
)
with open(default_parameter_file_name, "r") as in_file:
parameters = json.load(in_file)
if file_name != "":
with open(file_name, "r") as in_file:
user_defined_parameters = json.load(in_file)
parameters.update(user_defined_parameters)
# TODO: Numba expects proper floats or integers, not a mixture
# TODO: e.g. DT_error = 2.0, instead of DT_error = 2
if "threads" in parameters:
set_threads(parameters["threads"])
return parameters
def set_threads(threads):
global MAX_THREADS
max_cpu_count = multiprocessing.cpu_count()
if threads > max_cpu_count:
MAX_THREADS = max_cpu_count
else:
while threads <= 0:
threads += max_cpu_count
MAX_THREADS = threads
def get_file_names_with_extension(input_path, extension=""):
"""
Get all file names with a specific extension from a list of files and
folders.
Parameters
----------
input_path : iterable[str]
An iterable with files or folders from which all files with a specific
extension need to be selected.
extension : str
The extension of the files of interest.
Returns
-------
list
A sorted list with unique file names with the specific extension.
"""
input_files = set()
if not isinstance(extension, str):
for tmp_extension in extension:
for file_name in get_file_names_with_extension(
input_path,
tmp_extension
):
input_files.add(file_name)
else:
for current_path in input_path:
if os.path.isfile(current_path):
if current_path.endswith(extension):
input_files.add(current_path)
elif os.path.isdir(current_path):
for current_file_name in os.listdir(current_path):
if current_file_name.endswith(extension):
file_name = os.path.join(
current_path,
current_file_name
)
input_files.add(file_name)
return sorted([os.path.abspath(file_name) for file_name in input_files])
def read_data_from_file(
data_type,
file_name,
log_transform_intensity=True,
):
"""
Convert an [input_file.*] file to a pd.DataFrame with as columns the
dimensions associated with the data type.
Parameters
----------
data_type : str
The data type of the [input_file.*] file. Options are:
'DDA'
'SONAR'
'HDMSE'
'SWIMDIA'
'DIAPASEF'
file_name : str
The file name containing centroided ions.
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
if data_type == "DDA":
read_function = read_data_from_mgf_file
elif data_type == "SONAR":
read_function = read_data_from_sonar_file
elif data_type == "HDMSE":
read_function = read_data_from_hdmse_file
elif data_type == "SWIMDIA":
read_function = read_data_from_swimdia_file
elif data_type == "DIAPASEF":
read_function = read_data_from_diapasef_file
data = read_function(
file_name,
log_transform_intensity=log_transform_intensity,
)
return data
def read_data_from_mgf_file(
file_name,
log_transform_intensity=True,
):
"""
Convert an [mgf_input.mgf] file to a pd.DataFrame with as columns the
PRECURSOR_RT, PRECURSOR_MZ, FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
Parameters
----------
file_name : str
The file name of the DDA .mgf file (generated with ms-convert).
log_transform_intensity : bool
Transform the intensities to logarithmic values.
Returns
-------
pd.DataFrame
A pd.DataFrame with as columns the PRECURSOR_RT, PRECURSOR_MZ,
FRAGMENT_MZ and FRAGMENT_LOGINT dimensions.
"""
LOGGER.info(f"Reading mgf file {file_name}")
mz1s = []
mz2s = []
rts = []
ints = []
for spectrum in pyteomics.mgf.read(file_name):
peak_count = len(spectrum["intensity array"])
ints.append(spectrum["intensity array"])
mz2s.append(spectrum["m/z array"])
rts.append(
np.repeat(spectrum["params"]["rtinseconds"] / 60, peak_count)
)
mz1s.append(np.repeat(spectrum["params"]["pepmass"][0], peak_count))
mz1s = np.concatenate(mz1s)
mz2s = np.concatenate(mz2s)
rts = np.concatenate(rts)
ints = np.concatenate(ints)
if log_transform_intensity:
ints = np.log2(ints)
dimensions = [
"FRAGMENT_MZ",
"PRECURSOR_RT",
"FRAGMENT_LOGINT",
"PRECURSOR_MZ"
]
data = np.stack([mz2s, rts, ints, mz1s]).T
return | pd.DataFrame(data, columns=dimensions) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: 22_statistics.ipynb (unless otherwise specified).
__all__ = ['descriptive_table', 'ks_test', 'cohens_d', 'spearmans_r', 'calculate_walker_matrix', 'label_overlap_table',
'add_tables', 'color_matrix']
# Cell
import pandas as pd, numpy as np
from scipy import stats
def descriptive_table(measures_table, loud=False, extended=False):
"Creates the first table found in LaBrie et al's 2008 paper, which presents descriptive statistics for each of the behavioural measures they calculated."
# first pull all of the data out of the dictionary for more readable use
# later on
measure_names = list(measures_table.columns)[1:]
means = []
stds = []
medians = []
stats.iqrs = []
for measure in measure_names:
means.append(measures_table[measure].mean())
stds.append(measures_table[measure].std())
medians.append(measures_table[measure].median())
stats.iqrs.append(stats.iqr(measures_table[measure].values))
if loud:
print("calculating descriptive statistics for LaBrie measures")
descriptive_df = pd.DataFrame(columns=["measure", "mean", "std", "median"])
descriptive_df["measure"] = measure_names
descriptive_df["mean"] = means
descriptive_df["std"] = stds
descriptive_df["median"] = medians
if extended:
descriptive_df["iqr"] = stats.iqrs
descriptive_df.set_index("measure", inplace=True)
descriptive_df = descriptive_df.rename_axis(None)
return descriptive_df
# Cell
def ks_test(measures_table):
"Performs a one sample Kolmogorov-Smirnov test. This approximately indicates whether or not a collection of calculated behavioural measures are normally distributed."
measure_names = list(measures_table.columns)[1:]
scores = []
pvals = []
for measure in measure_names:
result = stats.kstest(measures_table[measure], "norm")
scores.append(result[0])
pvals.append(result[1])
ks_table = pd.DataFrame(columns=["Measure", "K-S Score", "p"])
ks_table["Measure"] = measure_names
ks_table["K-S Score"] = scores
ks_table["p"] = pvals
ks_table.set_index("Measure", inplace=True)
ks_table.rename_axis(None, inplace=True)
return ks_table
# Cell
import math
def cohens_d(measures_table, label):
"Calculates Cohen's d value between the behavioural measures of two groups of players. Groups are distinguished using a label column which is either 1 (in group) or 0 (not in group). For example, the column 'in_top5' may represent whether or not a player is in the top 5 % of players by total amount wagered, and would be 1 or 0 for the top 5 and remaining 95 percent respectively."
control_group = measures_table[measures_table[label] == 0]
experimental_group = measures_table[measures_table[label] == 1]
measure_names = list(measures_table.columns)[1:]
# remove the label column (no point doing cohens d on it)
measure_names.remove(label)
d_results = []
# do cohens d for each measure
for measure in measure_names:
control_measure = control_group[measure]
experimental_measure = experimental_group[measure]
control_mean = control_measure.mean()
experimental_mean = experimental_measure.mean()
control_sd = control_measure.std()
experimental_sd = experimental_measure.std()
control_n = len(control_measure)
experimental_n = len(experimental_measure)
top_line = ((control_n - 1) * control_sd ** 2) + (
(experimental_n - 1) * experimental_sd ** 2
)
pooled_sd = math.sqrt(top_line / (control_n + experimental_n - 2))
d = (control_mean - experimental_mean) / pooled_sd
d_results.append(d)
# make a nice dataframe to present the results
d_table = pd.DataFrame(columns=["Measure", "Cohen's d"])
d_table["Measure"] = measure_names
d_table["Cohen's d"] = d_results
d_table.set_index("Measure", inplace=True)
d_table.rename_axis(None, inplace=True)
return d_table
# Cell
def spearmans_r(measures_table, loud=False):
"Calculates the coefficients (nonparametric Spearman's r) between a collection of behavioural measures. The upper-right diagonal of the resulting matrix is discarded (symmetric)."
measure_names = list(measures_table.columns)[1:]
data = []
for column in measure_names:
data.append(measures_table[column].values)
labels = measure_names
coefs = []
p_values = []
for toprow in data:
for siderow in data:
coef, p = stats.spearmanr(toprow, siderow)
coefs.append(coef)
p_values.append(p)
coefs = np.array(coefs)
# reshape as matrix
coef_as_matrix = coefs.reshape(len(data), len(data))
# cut off top-diagonal elements
coef_as_matrix = np.tril(coef_as_matrix, -1)
p_values = np.array(p_values)
p_as_matrix = np.array(p_values).reshape(len(data), len(data))
p_as_matrix = np.tril(p_as_matrix, -1)
coef_df = pd.DataFrame(coef_as_matrix, columns=labels, index=labels)
p_df = | pd.DataFrame(p_as_matrix, columns=labels, index=labels) | pandas.DataFrame |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT, | pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG']) | pandas.DataFrame |
#OUTDATED
#Splices exons from genes together to construct a region approximating a gene
#Tracks process time
#pc_gene_eo.csv generated by awk filtering of mouse gene annoatation for protein_coding, exon_number, chr, with # lines removed
#pc_ids_eo.csv generated by awk filtering of mouse gene annoatation for protein_coding, exon_number, chr, with # lines removed for column $9
#uq_pc_ids_eo.csv generated by awk filtering of pc_ids_eo.csv for unique lines
#Exports constructed_genes.csv
import time
import pandas as pd
def duplicates(list, item):
"""Returns index locations of item in list"""
return [i for i, x in enumerate(list) if x == item]
panda_import_time = time.process_time()
print("pandas imported in " + str(panda_import_time))
#load gene data
gene_data_df = pd.read_csv("data/mm10_data/pc_genes_eo.csv", header=None, index_col=False)
gene_data_load_time = time.process_time() - panda_import_time
print("gene data loaded in " + str(gene_data_load_time))
#pc_ids_eo doesn't have constant row size, so pandas cannot be used to load dataframe
printed = False
ids = []
path = "data/mm10_data/pc_ids_eo.csv"
with open(path) as gene_ids:
ids_load_time = time.process_time() - gene_data_load_time
print("gene ids loaded in " + str(ids_load_time))
#split file into lines
lis = [line.split() for line in gene_ids]
for line in lis:
for id in line:
#split by comma, retain gene ids
id_values = id.split(',')
for value in id_values:
if 'gene_id' in value:
ids.append(value)
id_processed_time = time.process_time() - ids_load_time
print('ids processed in ' + str(id_processed_time))
#load unique ids -- This is faster than searching list of seen ids
uq_ids = []
path = "data/mm10_data/uq_pc_ids_eo.csv"
uq_pc_ids_eo_df = pd.read_csv(path, header=None, index_col=False)
uq_id_load_time = time.process_time() - id_processed_time
print('unique ids loaded in ' + str(uq_id_load_time))
for row in uq_pc_ids_eo_df[0]:
uq_ids.append(row)
chr = []
start = []
end = []
exons = []
for row in gene_data_df[0]:
chr.append(row)
for row in gene_data_df[1]:
start.append(row)
for row in gene_data_df[2]:
end.append(row)
basic_exon_processed_time = time.process_time() - id_processed_time
print('basic exons processed into feature lists in ' + str(basic_exon_processed_time))
for x in range(len(chr)):
exons.append([chr[x], start[x], end[x]])
basic_data_processed_time = time.process_time() - basic_exon_processed_time
print('basic exon data processed into gene lists ' + str(basic_exon_processed_time))
exons_in_gene = {}
#iterate through unique ids
for id in uq_ids:
exons_for_id = []
#find where ids and accompanying data are located by index
indexes = duplicates(ids, id)
for index in indexes:
exons_for_id.append(exons[index])
if printed == False:
print("Value added to genes_for_id list")
print(exons[index])
printed = True
exons_in_gene[id] = exons_for_id
exon_collection_time = time.process_time() - basic_data_processed_time
print("exons collected by id in " + str(exon_collection_time))
print("Number of exons: " + str(len(exons_in_gene)))
printed = False
total = 0
constructed_genes = []
#Iterate through gene ids, construct gene from left most exon and right most exon
for gene_id in exons_in_gene.keys():
total_exon_length = 0
#list of exon features
exon_starts = []
exon_ends = []
#constructed gene values
exon_start = 0
exon_end = 0
total += len(exons_in_gene[gene_id])
for exon in exons_in_gene[gene_id]:
total_exon_length += (int(exon[2]) - int(exon[1]))
exon_starts.append(exon[1])
exon_ends.append(exon[2])
#leftmost feature
exon_start = min(exon_starts)
#rightmost feature
exon_end = max(exon_ends)
constructed_genes.append([gene_id, exon[0], exon_start, exon_end, total_exon_length])
avg = total / len(exons_in_gene.keys())
print("Average number of exons per gene: " + str(avg))
constructed_genes_df = | pd.DataFrame(constructed_genes) | pandas.DataFrame |
from django.shortcuts import render_to_response
from django.utils.cache import patch_response_headers
from django.http import JsonResponse
from core.views import initRequest, login_customrequired
from core.utils import is_json_request
from core.iDDS.useconstants import SubstitleValue
from core.iDDS.rawsqlquery import getRequests, getTransforms, getWorkFlowProgressItemized
from core.iDDS.algorithms import generate_requests_summary, parse_request
from core.libs.exlib import lower_dicts_in_list
from core.libs.DateEncoder import DateEncoder
import pandas as pd
CACHE_TIMEOUT = 20
OI_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
subtitleValue = SubstitleValue()
def prepare_requests_summary(workflows):
summary = {'status': {}, 'username': {}}
"""
completion
age
"""
for workflow in workflows:
summary['status'][workflow['r_status']] = summary['status'].get(workflow['r_status'], 0) + 1
if workflow['username'] == '':
workflow['username'] = "Not set"
summary['username'][workflow['username']] = summary['username'].get(workflow['username'], 0) + 1
return summary
def get_workflow_progress_data(request_params, **kwargs):
workflows_items = getWorkFlowProgressItemized(request_params, **kwargs)
workflows_items = pd.DataFrame(workflows_items)
workflows_semi_grouped = []
if not workflows_items.empty:
workflows_items.USERNAME.fillna(value='', inplace=True)
workflows_pd = workflows_items.astype({"WORKLOAD_ID":str}).astype({"R_CREATED_AT":str}).groupby(['REQUEST_ID', 'R_STATUS', 'P_STATUS', 'R_NAME', 'USERNAME']).agg(
PROCESSING_FILES_SUM=pd.NamedAgg(column="PROCESSING_FILES", aggfunc="sum"),
PROCESSED_FILES_SUM=pd.NamedAgg(column="PROCESSED_FILES", aggfunc="sum"),
TOTAL_FILES=pd.NamedAgg(column="TOTAL_FILES", aggfunc="sum"),
P_STATUS_COUNT=pd.NamedAgg(column="P_STATUS", aggfunc="count"),
R_CREATED_AT= | pd.NamedAgg(column="R_CREATED_AT", aggfunc="first") | pandas.NamedAgg |
"""A mock experiment for use in testing"""
from typing import List, Tuple
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import pandas as pd
from neural_clbf.experiments import Experiment
from neural_clbf.controllers import Controller
class MockExperiment(Experiment):
"""A mock experiment for use during testing"""
def run(self, controller_under_test: Controller) -> pd.DataFrame:
"""
Run the experiment, likely by evaluating the controller, but the experiment
has freedom to call other functions of the controller as necessary (if these
functions are not supported by all controllers, then experiments will be
responsible for checking compatibility with the provided controller)
args:
controller_under_test: the controller with which to run the experiment
returns:
a pandas DataFrame containing the results of the experiment, in tidy data
format (i.e. each row should correspond to a single observation from the
experiment).
"""
# Return an empty dataframe
results_df = | pd.DataFrame({"t": [0, 1, 2, 3], "x": [0, 1, 2, 3]}) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import zipfile
import os
import geopy.distance
import random
import pandas as pd
import numpy as np
import csv
from enum import Enum
from yaml import safe_load
from maro.cli.data_pipeline.utils import download_file, StaticParameter
from maro.utils.logger import CliLogger
from maro.cli.data_pipeline.base import DataPipeline, DataTopology
logger = CliLogger(name=__name__)
class CitiBikePipeline(DataPipeline):
_download_file_name = "trips.zip"
_station_info_file_name = "full_station.json"
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(self, topology: str, source: str, station_info: str, is_temp: bool = False):
"""
Generate citi_bike data bin and other necessary files for the specified topology from specified source.
They will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
/source
/_download original data files
/_clean cleaned data files
/temp download temp files
Args:
topology(str): topology name of the data files
source(str): source url of original data file
station_info(str): source url of station info file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._station_info = station_info
self._station_info_file = os.path.join(self._download_folder, self._station_info_file_name)
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
self._common_data = {}
def download(self, is_force: bool = False):
"""download the zip file"""
super().download(is_force)
self._new_file_list.append(self._station_info_file)
if (not is_force) and os.path.exists(self._station_info_file):
logger.info_green("File already exists, skipping download.")
else:
logger.info_green(f"Downloading trip data from {self._station_info} to {self._station_info_file}")
download_file(source=self._station_info, destination=self._station_info_file)
def clean(self):
"""unzip the csv file and process it for building binary file"""
super().clean()
logger.info_green("Cleaning trip data")
if os.path.exists(self._download_file):
# unzip
logger.info_green("Unzip start")
with zipfile.ZipFile(self._download_file, "r") as zip_ref:
for filename in zip_ref.namelist():
# Only one csv file is expected.
if (
filename.endswith(".csv") and
(not (filename.startswith("__MACOSX") or filename.startswith(".")))
):
logger.info_green(f"Unzip {filename} from {self._download_file}")
zip_ref.extractall(self._clean_folder, [filename])
unzip_file = os.path.join(self._clean_folder, filename)
self._new_file_list.append(unzip_file)
self._preprocess(unzipped_file=unzip_file)
break
else:
logger.warning(f"Not found downloaded trip data: {self._download_file}")
def _read_common_data(self):
"""read and full init data and existed stations"""
full_stations = None
with open(self._station_info_file, mode="r", encoding="utf-8") as station_file:
# read station to station file
raw_station_data = pd.DataFrame.from_dict(pd.read_json(station_file)["data"]["stations"])
station_data = raw_station_data.rename(columns={
"lon": "station_longitude",
"lat": "station_latitude",
"region_id": "region"})
# group by station to generate station init info
full_stations = station_data[
["station_id", "capacity", "station_longitude", "station_latitude"]
].reset_index(drop=True)
# generate station id by index
full_stations["station_id"] = pd.to_numeric(full_stations["station_id"], downcast="integer")
full_stations["capacity"] = pd.to_numeric(full_stations["capacity"], downcast="integer")
full_stations["station_longitude"] = pd.to_numeric(full_stations["station_longitude"], downcast="float")
full_stations["station_latitude"] = pd.to_numeric(full_stations["station_latitude"], downcast="float")
full_stations.drop(full_stations[full_stations["capacity"] == 0].index, axis=0, inplace=True)
full_stations.dropna(
subset=["station_id", "capacity", "station_longitude", "station_latitude"], inplace=True
)
self._common_data["full_stations"] = full_stations
self._common_data["full_station_num"] = len(self._common_data["full_stations"])
self._common_data["full_dock_num"] = self._common_data["full_stations"]["capacity"].sum()
def _read_src_file(self, file: str):
"""read and return processed rows"""
ret = []
if os.path.exists(file):
# For ignoring the unimportant issues in the source file.
with open(file, "r", encoding="utf-8", errors="ignore") as fp:
ret = pd.read_csv(fp)
ret = ret[[
"tripduration", "starttime", "start station id", "end station id", "start station latitude",
"start station longitude", "end station latitude", "end station longitude", "gender", "usertype",
"bikeid"
]]
ret["tripduration"] = pd.to_numeric(
pd.to_numeric(ret["tripduration"], downcast="integer") / 60, downcast="integer"
)
ret["starttime"] = pd.to_datetime(ret["starttime"])
ret["start station id"] = pd.to_numeric(ret["start station id"], errors="coerce", downcast="integer")
ret["end station id"] = pd.to_numeric(ret["end station id"], errors="coerce", downcast="integer")
ret["start station latitude"] = pd.to_numeric(ret["start station latitude"], downcast="float")
ret["start station longitude"] = pd.to_numeric(ret["start station longitude"], downcast="float")
ret["end station latitude"] = pd.to_numeric(ret["end station latitude"], downcast="float")
ret["end station longitude"] = pd.to_numeric(ret["end station longitude"], downcast="float")
ret["bikeid"] = pd.to_numeric(ret["bikeid"], errors="coerce", downcast="integer")
ret["gender"] = pd.to_numeric(ret["gender"], errors="coerce", downcast="integer")
ret["usertype"] = ret["usertype"].apply(str).apply(
lambda x: 0 if x in ["Subscriber", "subscriber"] else 1 if x in ["Customer", "customer"] else 2
)
ret.dropna(subset=[
"start station id", "end station id", "start station latitude", "end station latitude",
"start station longitude", "end station longitude"
], inplace=True)
ret.drop(
ret[
(ret["tripduration"] <= 1) |
(ret["start station latitude"] == 0) |
(ret["start station longitude"] == 0) |
(ret["end station latitude"] == 0) |
(ret["end station longitude"] == 0)
].index,
axis=0,
inplace=True
)
ret = ret.sort_values(by="starttime", ascending=True)
return ret
def _process_src_file(self, src_data: pd.DataFrame):
used_bikes = len(src_data[["bikeid"]].drop_duplicates(subset=["bikeid"]))
trip_data = src_data[
(src_data["start station latitude"] > 40.689960) &
(src_data["start station latitude"] < 40.768334) &
(src_data["start station longitude"] > -74.019623) &
(src_data["start station longitude"] < -73.909760)
]
trip_data = trip_data[
(trip_data["end station latitude"] > 40.689960) &
(trip_data["end station latitude"] < 40.768334) &
(trip_data["end station longitude"] > -74.019623) &
(trip_data["end station longitude"] < -73.909760)
]
trip_data["start_station_id"] = trip_data["start station id"]
trip_data["end_station_id"] = trip_data["end station id"]
# get new stations
used_stations = []
used_stations.append(
trip_data[["start_station_id", "start station latitude", "start station longitude", ]].drop_duplicates(
subset=["start_station_id"]).rename(
columns={
"start_station_id": "station_id",
"start station latitude": "latitude",
"start station longitude": "longitude"
}))
used_stations.append(
trip_data[["end_station_id", "end station latitude", "end station longitude", ]].drop_duplicates(
subset=["end_station_id"]).rename(
columns={
"end_station_id": "station_id",
"end station latitude": "latitude",
"end station longitude": "longitude"
}))
in_data_station = pd.concat(used_stations, ignore_index=True).drop_duplicates(
subset=["station_id"]
).sort_values(by=["station_id"]).reset_index(drop=True)
stations_existed = pd.DataFrame(in_data_station[["station_id"]])
stations_existed["station_index"] = pd.to_numeric(stations_existed.index, downcast="integer")
# get start station id and end station id
trip_data = trip_data.join(
stations_existed.set_index("station_id"),
on="start_station_id"
).rename(columns={"station_index": "start_station_index"})
trip_data = trip_data.join(
stations_existed.set_index("station_id"),
on="end_station_id"
).rename(columns={"station_index": "end_station_index"})
trip_data = trip_data.rename(columns={"starttime": "start_time", "tripduration": "duration"})
trip_data = trip_data[
["start_time", "start_station_id", "end_station_id", "duration", "start_station_index", "end_station_index"]
]
return trip_data, used_bikes, in_data_station, stations_existed
def _process_current_topo_station_info(
self, stations_existed: pd.DataFrame, used_bikes: int, loc_ref: pd.DataFrame
):
data_station_init = stations_existed.join(
self._common_data["full_stations"][["station_id", "capacity"]].set_index("station_id"),
on="station_id"
).join(
loc_ref[["station_id", "latitude", "longitude"]].set_index("station_id"),
on="station_id"
)
# data_station_init.rename(columns={"station_id": "station_index"}, inplace=True)
avg_capacity = int(self._common_data["full_dock_num"] / self._common_data["full_station_num"])
avalible_bike_rate = used_bikes / self._common_data["full_dock_num"]
values = {"capacity": avg_capacity}
data_station_init.fillna(value=values, inplace=True)
data_station_init["init"] = (data_station_init["capacity"] * avalible_bike_rate).round().apply(int)
data_station_init["capacity"] = pd.to_numeric(data_station_init["capacity"],
errors="coerce", downcast="integer")
data_station_init["station_id"] = pd.to_numeric(data_station_init["station_id"],
errors="coerce", downcast="integer")
return data_station_init
def _process_distance(self, station_info: pd.DataFrame):
distance_adj = pd.DataFrame(0, index=station_info["station_index"],
columns=station_info["station_index"], dtype=np.float)
look_up_df = station_info[["latitude", "longitude"]]
return distance_adj.apply(lambda x: pd.DataFrame(x).apply(lambda y: geopy.distance.distance(
(look_up_df.at[x.name, "latitude"], look_up_df.at[x.name, "longitude"]),
(look_up_df.at[y.name, "latitude"], look_up_df.at[y.name, "longitude"])
).km, axis=1), axis=1)
def _preprocess(self, unzipped_file: str):
self._read_common_data()
logger.info_green("Reading raw data")
org_data = self._read_src_file(file=unzipped_file)
logger.info_green("Processing trip data")
trip_data, used_bikes, in_data_station, stations_existed = self._process_src_file(src_data=org_data)
self._new_file_list.append(self._clean_file)
self._new_file_list.append(self._station_meta_file)
self._new_file_list.append(self._distance_file)
with open(self._clean_file, mode="w", encoding="utf-8", newline="") as f:
trip_data.to_csv(f, index=False, header=True)
logger.info_green("Processing init data")
station_info = self._process_current_topo_station_info(
stations_existed=stations_existed, used_bikes=used_bikes, loc_ref=in_data_station
)
with open(self._station_meta_file, mode="w", encoding="utf-8", newline="") as f:
station_info.to_csv(f, index=False, header=True)
logger.info_green("Processing distance data")
station_distance = self._process_distance(station_info=station_info)
with open(self._distance_file, mode="w", encoding="utf-8", newline="") as f:
station_distance.to_csv(f, index=False, header=True)
class WeatherPipeline(DataPipeline):
_last_day_temp = None # used to fill the temp for days which have no temp info
_download_file_name = "weather.csv"
_clean_file_name = "weather.csv"
_build_file_name = "KNYC_daily.bin"
_meta_file_name = "weather.yml"
class WeatherEnum(Enum):
SUNNY = 0
RAINY = 1
SNOWY = 2
SLEET = 3
def __init__(self, topology: str, source: str, is_temp: bool = False):
"""
Generate weather data bin for the specified topology from frontierweather.com.
Generated files will be generated in ~/.maro/data/citi_bike/[topology]/_build.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file
/source
/_download original data file
/_clean cleaned data file
/temp download temp file
Args:
topology(str): topology name of the data file
source(str): source url of original data file
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, source, is_temp)
self._common_data = {}
def clean(self):
super().clean()
if os.path.exists(self._download_file):
self._new_file_list.append(self._clean_file)
logger.info_green("Cleaning weather data")
self._preprocess(input_file=self._download_file, output_file=self._clean_file)
else:
logger.warning(f"Not found downloaded weather data: {self._download_file}")
def _weather(self, row: dict):
water_str = row["Precipitation Water Equiv"]
water = round(float(water_str), 2) if water_str != "" else 0.0
snow_str = row["Snowfall"]
snow = round(float(snow_str), 2) if snow_str != "" else 0.0
if snow > 0.0 and water > 0:
return WeatherPipeline.WeatherEnum.SLEET.value
elif water > 0.0:
return WeatherPipeline.WeatherEnum.RAINY.value
elif snow > 0.0:
return WeatherPipeline.WeatherEnum.SNOWY.value
else:
return WeatherPipeline.WeatherEnum.SUNNY.value
def _parse_date(self, row: dict):
dstr = row.get("Date", None)
return dstr
def _parse_row(self, row: dict):
date = self._parse_date(row=row)
wh = self._weather(row=row)
temp_str = row["Avg Temp"]
temp = round(float(temp_str), 2) if temp_str != "" and temp_str is not None else self._last_day_temp
self._last_day_temp = temp
return {"date": date, "weather": wh, "temp": temp} if date is not None else None
def _preprocess(self, input_file: str, output_file: str):
data: list = None
with open(input_file, "rt") as fp:
reader = csv.DictReader(fp)
data = [self._parse_row(row=row) for row in reader]
data = filter(None, data)
with open(output_file, "w+") as fp:
writer = csv.DictWriter(fp, ["date", "weather", "temp"])
writer.writeheader()
writer.writerows(data)
class CitiBikeTopology(DataTopology):
"""
Data topology for a predefined topology of citi_bike scenario.
Args:
topology(str): topology name of the data file
trip_source(str): original source url of citi_bike data
station_info(str): current status station info of the stations
weather_source(str): original source url of weather data
is_temp(bool): (optional) if the data file is temporary
"""
def __init__(self, topology: str, trip_source: str, station_info: str, weather_source: str, is_temp: bool = False):
super().__init__()
self._data_pipeline["trip"] = CitiBikePipeline(topology, trip_source, station_info, is_temp)
self._data_pipeline["weather"] = NOAAWeatherPipeline(topology, weather_source, is_temp)
self._is_temp = is_temp
def __del__(self):
if self._is_temp:
self.remove()
class CitiBikeToyPipeline(DataPipeline):
_clean_file_name = "trips.csv"
_build_file_name = "trips.bin"
_station_meta_file_name = "station_meta.csv"
_distance_file_name = "distance_adj.csv"
_meta_file_name = "trips.yml"
def __init__(
self, start_time: str, end_time: str, stations: list, trips: list, topology: str, is_temp: bool = False
):
"""
Generate synthetic business events and station initialization distribution for Citi Bike scenario,
from the predefined toy topologies.
Folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file and other necessary files
Args:
start_time(str): start time of the toy data
end_time(str): end time of the toy data
stations(list): list of stations info
trips(list): list of trips probability
topology(str): topology name of the data files
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__("citi_bike", topology, "", is_temp)
self._start_time = start_time
self._end_time = end_time
self._stations = stations
self._trips = trips
self._distance_file = os.path.join(self._build_folder, self._distance_file_name)
self._station_meta_file = os.path.join(self._build_folder, self._station_meta_file_name)
def download(self, is_force: bool):
pass
def _station_dict_to_pd(self, station_dict):
"""convert dictionary of station information to pd series"""
return pd.Series(
[
station_dict["id"],
station_dict["capacity"],
station_dict["init"],
station_dict["lat"],
station_dict["lon"],
],
index=["station_index", "capacity", "init", "latitude", "longitude"])
def _gen_stations(self):
"""generate station meta csv"""
self._new_file_list.append(self._station_meta_file)
stations = pd.Series(self._stations).apply(self._station_dict_to_pd)
stations["station_index"] = pd.to_numeric(stations["station_index"], errors="coerce", downcast="integer")
stations["station_id"] = pd.to_numeric(stations["station_index"], errors="coerce", downcast="integer")
stations["capacity"] = pd.to_numeric(stations["capacity"], errors="coerce", downcast="integer")
stations["init"] = pd.to_numeric(stations["init"], errors="coerce", downcast="integer")
with open(self._station_meta_file, "w", encoding="utf-8", newline="") as f:
stations.to_csv(f, index=False, header=True)
return stations
def _gen_trip(self, tick):
"""generate trip record"""
ret_list = []
cur_probability = random.uniform(0, 1)
for trip in self._trips:
if trip["probability"] >= cur_probability:
ret = {}
ret["start_time"] = tick
ret["start_station_id"] = trip["start_id"]
ret["end_station_id"] = trip["end_id"]
ret["start_station_index"] = trip["start_id"]
ret["end_station_index"] = trip["end_id"]
ret["duration"] = random.uniform(0, 120)
ret_list.append(ret)
return ret_list
def _gen_trips(self):
"""generate trip records csv files"""
cur_tick = pd.to_datetime(self._start_time)
end_tick = pd.to_datetime(self._end_time)
trips = []
while cur_tick < end_tick:
new_trips = self._gen_trip(cur_tick)
trips.extend(new_trips)
cur_tick += pd.Timedelta(120, unit="second")
trips_df = pd.DataFrame.from_dict(trips)
trips_df["start_station_index"] = pd.to_numeric(trips_df["start_station_index"],
errors="coerce", downcast="integer")
trips_df["end_station_index"] = pd.to_numeric(trips_df["end_station_index"],
errors="coerce", downcast="integer")
self._new_file_list.append(self._clean_file)
with open(self._clean_file, "w", encoding="utf-8", newline="") as f:
trips_df.to_csv(f, index=False, header=True)
return trips_df
def _gen_distance(self, station_init: pd.DataFrame):
"""generate distance metrix csv file"""
distance_adj = pd.DataFrame(
0,
index=station_init["station_index"],
columns=station_init["station_index"],
dtype=np.float
)
look_up_df = station_init[["latitude", "longitude"]]
distance_df = distance_adj.apply(lambda x: pd.DataFrame(x).apply(lambda y: geopy.distance.distance(
(look_up_df.at[x.name, "latitude"], look_up_df.at[x.name, "longitude"]),
(look_up_df.at[y.name, "latitude"], look_up_df.at[y.name, "longitude"])
).km, axis=1), axis=1)
self._new_file_list.append(self._distance_file)
with open(self._distance_file, "w", encoding="utf-8", newline="") as f:
distance_df.to_csv(f, index=False, header=True)
return distance_df
def clean(self):
logger.info_green(f"Generating trip data for topology {self._topology} .")
super().clean()
stations = self._gen_stations()
self._gen_trips()
self._gen_distance(stations)
class WeatherToyPipeline(WeatherPipeline):
def __init__(self, topology: str, start_time: str, end_time: str, is_temp: bool = False):
"""
Generate weather data bin for the specified topology from frontierweather.com.
It will be generated in ~/.maro/data/citi_bike/[topology]/_build.
folder structure:
~/.maro
/data/citi_bike/[topology]
/_build bin data file
/source
/_download original data file
/_clean cleaned data file
/temp download temp file
Args:
topology(str): topology name of the data file
start_time(str): start time of the toy data
end_time(str): end time of the toy data
is_temp(bool): (optional) if the data file is temporary
"""
super().__init__(topology, "", is_temp)
self._start_time = start_time
self._end_time = end_time
def download(self, is_force: bool):
pass
def clean(self):
logger.info_green("Cleaning weather data")
DataPipeline.clean(self)
self._new_file_list.append(self._clean_file)
self._preprocess(output_file=self._clean_file)
def _weather(self):
water = round(float(random.uniform(-1, 1)), 2)
snow = round(float(random.uniform(-1, 1)), 2)
if snow > 0.0 and water > 0:
return WeatherPipeline.WeatherEnum.SLEET.value
elif water > 0.0:
return WeatherPipeline.WeatherEnum.RAINY.value
elif snow > 0.0:
return WeatherPipeline.WeatherEnum.SNOWY.value
else:
return WeatherPipeline.WeatherEnum.SUNNY.value
def _gen_weather(self, tick):
date = tick.strftime("%m/%d/%Y %H:%M:%S")
wh = self._weather()
temp = round(float(random.uniform(-1, 1) * 40), 2)
return {"date": date, "weather": wh, "temp": temp}
def _preprocess(self, output_file: str):
data: list = []
cur_tick = pd.to_datetime(self._start_time)
end_tick = | pd.to_datetime(self._end_time) | pandas.to_datetime |
# https://www.kaggle.com/devanshbesain/exploration-and-analysis-auto-mpg
#https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_regression.ipynb
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import warnings
warnings.filterwarnings('ignore')
pd.set_option('precision', 2) # 2 decimal places
pd.set_option('display.max_rows', 20)
pd.set_option('display.max_columns', 30)
pd.set_option('display.width', 150) # wide windows
import os
figdir = os.path.join(os.environ["PYPROBML"], "figures")
#from sklearn.datasets import fetch_openml
#auto = fetch_openml('autoMpg', cache=True)
# The OpenML version converts the original categorical data
# to integers starting at 0.
# We want the 'raw' data.
#url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
# We made a cached copy since UCI repository is often down
url = 'https://raw.githubusercontent.com/probml/pyprobml/master/data/mpg.csv'
#column_names = ['mpg','cylinders','displacement','horsepower','weight',
# 'acceleration', 'model_year', 'origin', 'name']
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Year', 'Origin', 'Name']
df = pd.read_csv(url, names=column_names, sep='\s+', na_values="?")
# The last column (name) is a unique id for the car, so we drop it
df = df.drop(columns=['Name'])
df.info()
"""
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 398 entries, 0 to 397
Data columns (total 8 columns):
MPG 398 non-null float64
Cylinders 398 non-null int64
Displacement 398 non-null float64
Horsepower 392 non-null float64
Weight 398 non-null float64
Acceleration 398 non-null float64
Year 398 non-null int64
Origin 398 non-null int64
dtypes: float64(5), int64(3)
memory usage: 25.0 KB
"""
# We notice that there are only 392 horsepower rows, but 398 of the others.
# This is because the HP column has 6 missing values (also called NA, or
# not available).
# There are 3 main ways to deal with this:
# Drop the rows with any missing values using dropna()
# Drop any columns with any missing values using drop()
# Replace the missing vales with some other valye (eg the median) using fillna.
# (This latter is called missing value imputation.)
df = df.dropna()
# Origin is categorical (1=USA, 2=Europe, 3=Japan)
df['Origin'] = df.Origin.replace([1,2,3],['USA','Europe','Japan'])
df['Origin'] = df['Origin'].astype('category')
# Cylinders is an integer in [3,4,5,6,8]
#df['Cylinders'] = df['Cylinders'].astype('category')
# Year is an integer year (between 70 and 82)
#df['Year'] = df['Year'].astype('category')
df0 = df.copy()
# Let us check the datatypes
print(df.dtypes)
"""
MPG float64
Cylinders int64
Displacement float64
Horsepower float64
Weight float64
Acceleration float64
Year int64
Origin category
"""
# Let us check the categories
df['Origin'].cat.categories #Index(['Europe', 'Japan', 'USA'], dtype='object')
# Let us inspect the data
df.tail()
"""
MPG Cylinders Displacement Horsepower Weight Acceleration Year Origin
393 27.0 4 140.0 86.0 2790.0 15.6 82 USA
394 44.0 4 97.0 52.0 2130.0 24.6 82 Europe
395 32.0 4 135.0 84.0 2295.0 11.6 82 USA
396 28.0 4 120.0 79.0 2625.0 18.6 82 USA
397 31.0 4 119.0 82.0 2720.0 19.4 82 USA
"""
#https://www.kaggle.com/devanshbesain/exploration-and-analysis-auto-mpg
# Plot mpg distribution for cars from different countries of origin
data = pd.concat( [df['MPG'], df['Origin']], axis=1)
fig, ax = plt.subplots()
ax = sns.boxplot(x='Origin', y='MPG', data=data)
ax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)
plt.savefig(os.path.join(figdir, 'auto-mpg-origin-boxplot.pdf'))
plt.show()
# Plot mpg distribution for cars from different years
data = pd.concat( [df['MPG'], df['Year']], axis=1)
fig, ax = plt.subplots()
ax = sns.boxplot(x='Year', y='MPG', data=data)
ax.axhline(data.MPG.mean(), color='r', linestyle='dashed', linewidth=2)
plt.savefig(os.path.join(figdir, 'auto-mpg-year-boxplot.pdf'))
plt.show()
# Convert origin string (factor) to integer
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
origin = df['Origin']
origin = encoder.fit_transform(origin)
# Check the data
print(np.unique(origin)) # [0 1 2] # Note the same as original [1,2,3]
# Check the encoding - happens to be the same as original ordering
print(encoder.classes_) # ['Europe' 'Japan' 'USA']
# Convert back (from printing purposes)
origin_names = encoder.inverse_transform(origin)
assert np.array_equal(origin_names, df['Origin'])
# Convert integer encoding to one-hot vectors
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder()
origin = origin.reshape(-1, 1) # Convert (N) to (N,1)
origin_onehot = encoder.fit_transform(origin) # Sparse array
# Convert to dense array for printing purposes
print(origin_onehot[-5:,:].toarray())
"""
[[0. 0. 1.]
[1. 0. 0.]
[0. 0. 1.]
[0. 0. 1.]
[0. 0. 1.]]
"""
"""
# We shoukd be able to combine LabelEncoder and OneHotEncoder together
# using a Pipeline. However this fails due to known bug: https://github.com/scikit-learn/scikit-learn/issues/3956
# TypeError: fit_transform() takes 2 positional arguments but 3 were given
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('str2int', LabelEncoder()),
('int2onehot', OneHotEncoder())
])
origin_onehot2 = pipeline.fit_transform(df['Origin'])
"""
# Convert origin string to one-hot encoding
# New feature for sckit v0.20
# https://jorisvandenbossche.github.io/blog/2017/11/20/categorical-encoder/
# https://medium.com/bigdatarepublic/integrating-pandas-and-scikit-learn-with-pipelines-f70eb6183696
from sklearn.preprocessing import OneHotEncoder
def one_hot_encode_dataframe_col(df, colname):
encoder = OneHotEncoder(sparse=False)
data = df[[colname]] # Extract column as (N,1) matrix
data_onehot = encoder.fit_transform(data)
df = df.drop(columns=[colname])
ncats = np.size(encoder.categories_)
for c in range(ncats):
colname_c = '{}:{}'.format(colname, c)
df[colname_c] = data_onehot[:,c]
return df, encoder
df_onehot, encoder_origin = one_hot_encode_dataframe_col(df, 'Origin')
df_onehot.tail()
"""
Cylinders Displacement Horsepower Weight Acceleration Year Origin:0 Origin:1 Origin:2
393 4 140.0 86.0 2790.0 15.6 82 0.0 0.0 1.0
394 4 97.0 52.0 2130.0 24.6 82 1.0 0.0 0.0
395 4 135.0 84.0 2295.0 11.6 82 0.0 0.0 1.0
396 4 120.0 79.0 2625.0 18.6 82 0.0 0.0 1.0
397 4 119.0 82.0 2720.0 19.4 82 0.0 0.0 1.0
"""
# See also sklearn-pandas library
#https://github.com/scikit-learn-contrib/sklearn-pandas#transformation-mapping
# Replace year with decade (70s and 80s)
year = df.pop('Year')
decade = [ 70 if (y>=70 and y<=79) else 80 for y in year ]
df['Decade'] = | pd.Series(decade, dtype='category') | pandas.Series |
'''
Code author: @wallissoncarvalho
Github repository: github.com/wallissoncarvalho/HidroData
Under BSD 3-Clause "New" or "Revised" License
'''
import xml.etree.ElementTree as ET
import requests
import pandas as pd
import calendar
import geopandas as gpd
from tqdm import tqdm
def inventario(params={'codEstDE': '', 'codEstATE': '', 'tpEst': '1', 'nmEst': '', 'nmRio': '', 'codSubBacia': '', 'codBacia': '',
'nmMunicipio': '', 'nmEstado': '', 'sgResp': '', 'sgOper': '', 'telemetrica': ''}):
'''
Essa função busca as estações cadastradas no inventário do Hidroweb.
Por padrão o código está buscando estações fluviométricas. Você pode buscar por estações pluviométrias alterando o argumento tpEst para 2.
Se você quiser passar uma seleção específica crie um dicionário com os seguintes parâmetros:
codEstDE: Código de 8 dígitos da estação - INICIAL (Ex.: 00047000)
codEstATE: Código de 8 dígitos da estação - FINAL (Ex.: 90300000)
tpEst: Tipo da estação (1-Flu ou 2-Plu)
nmEst: Nome da Estação (Ex.: Barra Mansa)
nmRio: Nome do Rio (Ex.: Rio Javari)
codSubBacia: Código da Sub-Bacia hidrografica (Ex.: 10)
codBacia: Código da Bacia hidrografica (Ex.: 1)
nmMunicipio: Município (Ex.: Itaperuna)
nmEstado: Estado (Ex.: Rio de Janeiro)
sgResp: Sigla do Responsável pela estação (Ex.: ANA)
sgOper: Sigla da Operadora da estação (Ex.: CPRM)
telemetrica: (Ex: 1-SIM ou 0-NÃO)
Dicionário com campos obritaróios, preencha os campos desejados:
params = {'codEstDE': '', 'codEstATE': '', 'tpEst': '', 'nmEst': '', 'nmRio': '', 'codSubBacia': '', 'codBacia': '',
'nmMunicipio': '', 'nmEstado': '', 'sgResp': '', 'sgOper': '', 'telemetrica': ''}
'''
check_params = ['codEstDE', 'codEstATE', 'tpEst', 'nmEst', 'nmRio', 'codSubBacia',
'codBacia', 'nmMunicipio', 'nmEstado', 'sgResp', 'sgOper', 'telemetrica']
if list(params.keys())!=check_params:
print('O argumento params deve estar vazio ou conter os campos obrigatórios, use help(inventario) para mais informações.')
return
response = requests.get('http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroInventario', params)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
if params['tpEst'] == '1':
index=1
stations = pd.DataFrame(columns=['Nome','Codigo', 'Tipo','AreaDrenagem','SubBacia', 'Municipio','Estado',
'Responsavel', 'Latitude', 'Longitude'])
for station in tqdm(root.iter('Table')):
stations.at[index, 'Nome'] = station.find('Nome').text
stations.at[index, 'Codigo'] = station.find('Codigo').text
stations.at[index, 'Tipo'] = station.find('TipoEstacao').text
stations.at[index, 'AreaDrenagem'] = station.find('AreaDrenagem').text
stations.at[index, 'SubBacia'] = station.find('SubBaciaCodigo').text
stations.at[index, 'Municipio'] = station.find('nmMunicipio').text
stations.at[index, 'Estado'] = station.find('nmEstado').text
stations.at[index, 'Responsavel'] = station.find('ResponsavelSigla').text
stations.at[index, 'Latitude'] = float(station.find('Latitude').text)
stations.at[index, 'Longitude'] = float(station.find('Longitude').text)
index+=1
elif params['tpEst'] == '2':
index=1
stations = pd.DataFrame(columns=['Nome','Codigo', 'Tipo','SubBacia', 'Municipio','Estado',
'Responsavel', 'Latitude', 'Longitude'])
for station in tqdm(root.iter('Table')):
stations.at[index, 'Nome'] = station.find('Nome').text
stations.at[index, 'Codigo'] = station.find('Codigo').text
stations.at[index, 'Tipo'] = station.find('TipoEstacao').text
stations.at[index, 'SubBacia'] = station.find('SubBaciaCodigo').text
stations.at[index, 'Municipio'] = station.find('nmMunicipio').text
stations.at[index, 'Estado'] = station.find('nmEstado').text
stations.at[index, 'Responsavel'] = station.find('ResponsavelSigla').text
stations.at[index, 'Latitude'] = float(station.find('Latitude').text)
stations.at[index, 'Longitude'] = float(station.find('Longitude').text)
index+=1
else:
print('Por favor selecione um tipo de estação, use help(inventario) para mais informações.')
return
stations = gpd.GeoDataFrame(stations, geometry = gpd.points_from_xy(stations.Longitude, stations.Latitude))
return stations
def stations(list_stations, tipoDados):
'''
A partir de uma lista com o número das estações, essa função retorna a série de dados diárias em um dataframe
tipoDados deve ser em formato string (e.g. '2')
'''
params = {'codEstacao': '', 'dataInicio': '', 'dataFim': '', 'tipoDados': '', 'nivelConsistencia': ''}
typesData = {'3': ['Vazao{:02}'], '2': ['Chuva{:02}'], '1': ['Cota{:02}']}
params['tipoDados'] = tipoDados
data_stations = []
nodata_stations = []
not_try = []
for station in tqdm(list_stations):
params['codEstacao'] = str(station)
try:
response = requests.get('http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroSerieHistorica', params, timeout=50.0)
except (requests.ConnectTimeout, requests.HTTPError, requests.ReadTimeout, requests.Timeout, requests.ConnectionError):
not_try.append(station)
continue
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
df=[]
for month in root.iter('SerieHistorica'):
codigo = month.find('EstacaoCodigo').text
consist = int(month.find('NivelConsistencia').text)
date = pd.to_datetime(month.find('DataHora').text,dayfirst=True)
date = pd.Timestamp(date.year, date.month, 1, 0)
last_day=calendar.monthrange(date.year,date.month)[1]
month_dates = pd.date_range(date,periods=last_day, freq='D')
data = []
list_consist = []
for i in range(last_day):
value = typesData[params['tipoDados']][0].format(i+1)
try:
data.append(float(month.find(value).text))
list_consist.append(consist)
except TypeError:
data.append(month.find(value).text)
list_consist.append(consist)
except AttributeError:
data.append(None)
list_consist.append(consist)
index_multi = list(zip(month_dates,list_consist))
index_multi = pd.MultiIndex.from_tuples(index_multi,names=["Date","Consistence"])
df.append(pd.DataFrame({f'{int(codigo):08}': data}, index=index_multi))
if (len(df))>0:
df = pd.concat(df)
df = df.sort_index()
drop_index = df.reset_index(level=1,drop=True).index.duplicated(keep='last')
df = df[~drop_index]
df = df.reset_index(level=1, drop=True)
series = df[f'{int(codigo):08}']
date_index = | pd.date_range(series.index[0], series.index[-1], freq='D') | pandas.date_range |
import pandas as pd
import numpy as np
import os
import seaborn as sns
import pydeck as pdk
import sklearn.neighbors
from util import config
from util import mapping
def set_presets():
presets_descriptions = [
'Chilling out in the saddle', 'Pretty relaxed, with some climbing',
'Half-day of touring', 'Training for VO2-max', 'Training for strength',
'Training for a century']
presets = pd.DataFrame({
'dist': [10., 15., 45., 20., 10., 85.],
'avg_slope_climbing': [3., 6., 5., 5., 8., 4.],
'avg_slope_descending': [-3., -6., -5., -5., -8., -4.],
'max_slope': [6., 10., 10., 6., 15., 10.],
'dist_climbing': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
'dist_downhill': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
'dist_6percent': [0.05, 0.05, 0.05, 0.05, 0.05, 0.05],
'dist_9percent': [0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
'dist_12percent': [0.005, 0.005, 0.005, 0.005, 0.005, 0.005],
'detour_score': [10, 10, 10, 10, 10, 10],
'popularity': [10, 10, 10, 10, 10, 10],
})
return presets, presets_descriptions
def engineer_features(df):
df_eng = df.copy()
df_eng['dist'] = np.log(df.dist +1e-2)
df_eng['dist_6percent'] = np.log(df.dist_6percent + 1e-2)
df_eng['dist_9percent'] = np.log(df.dist_9percent + 1e-2)
df_eng['dist_12percent'] = np.log(df.dist_12percent + 1e-2)
return df_eng
def reverse_engineer_features(df):
df_reverse = df.copy()
df_reverse['dist'] = np.exp(df.dist) - 1e-2
df_reverse['dist_6percent'] = np.exp(df.dist_6percent) - 1e-2
df_reverse['dist_9percent'] = np.exp(df.dist_9percent) - 1e-2
df_reverse['dist_12percent'] = np.exp(df.dist_12percent) - 1e-2
return df_reverse
def remove_scaling(df):
# Scaling is ((X - mean) / std ) * column_importance
scaler = | pd.read_feather(config.MODEL_PATH + 'feature_scaling.feather') | pandas.read_feather |
import os
import json
import pathlib
import multiprocessing as mp
from glob import glob
import pandas as pd
import toml
import ipywidgets as widgets
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import seaborn as sns
import numpy as np
import zipfile
def mkdirp(dirpath):
pathlib.Path(dirpath).mkdir(parents=True, exist_ok=True)
# sugar around recursive glob search
def find_files(dirname, filename_glob):
path = '{}/**/{}'.format(dirname, filename_glob)
return glob(path, recursive=True)
def empty_scores_dataframe():
return pd.DataFrame([], columns=['observer', 'peer', 'timestamp', 'score']).astype(
{'score': 'float64', 'observer': 'int64', 'peer': 'int64', 'timestamp': 'datetime64[ns]'})
def aggregate_peer_scores_single(scores_filepath, peers_table):
df = empty_scores_dataframe()
# select the cols from peers table we want to join on
p = peers_table[['peer_id', 'seq', 'honest']]
with open(scores_filepath, 'rt') as f:
for line in iter(f.readline, ''):
try:
data = json.loads(line)
except BaseException as err:
print('error parsing score json: ', err)
continue
scores = pd.json_normalize(data['Scores'])
scores = scores.T \
.rename(columns={0: 'score'}) \
.reset_index() \
.rename(columns={'index': 'peer_id'})
scores['timestamp'] = pd.to_datetime(data['Timestamp'])
scores['observer_id'] = data['PeerID']
# join with peers table to convert peer ids to seq numbers
s = scores.merge(p, on='peer_id').drop(columns=['peer_id'])
s = s.merge(p.drop(columns=['honest']), left_on='observer_id', right_on='peer_id', suffixes=['_peer', '_observer'])
s = s.drop(columns=['peer_id', 'observer_id'])
s = s.rename(columns={'seq_peer': 'peer', 'seq_observer': 'observer'})
df = df.append(s, ignore_index=True)
df.set_index('timestamp', inplace=True)
return df
def aggregate_peer_scores(score_filepaths, peers_table):
if len(score_filepaths) == 0:
return empty_scores_dataframe()
pool = mp.Pool(mp.cpu_count())
args = [(f, peers_table) for f in score_filepaths]
results = pool.starmap(aggregate_peer_scores_single, args)
# concat all data frames into one
return pd.concat(results)
def empty_metrics_dataframe():
return pd.DataFrame([], columns=['published', 'rejected', 'delivered', 'duplicates', 'droppedrpc',
'peersadded', 'peersremoved', 'topicsjoined', 'topicsleft', 'peer',
'sent_rpcs', 'sent_messages', 'sent_grafts', 'sent_prunes',
'sent_iwants', 'sent_ihaves', 'recv_rpcs', 'recv_messages',
'recv_grafts', 'recv_prunes', 'recv_iwants', 'recv_ihaves'])
def aggregate_metrics_to_pandas_single(metrics_filepath, peers_table):
def munge_keys(d, prefix=''):
out = dict()
for k, v in d.items():
outkey = prefix + k.lower()
out[outkey] = v
return out
rows = list()
with open(metrics_filepath, 'rb') as f:
try:
e = json.load(f)
except BaseException as err:
print('error loading metrics entry: ', err)
else:
pid = e['LocalPeer']
sent = munge_keys(e['SentRPC'], 'sent_')
recv = munge_keys(e['ReceivedRPC'], 'recv_')
del(e['LocalPeer'], e['SentRPC'], e['ReceivedRPC'])
row = munge_keys(e)
row.update(sent)
row.update(recv)
rows.append(row)
row['peer_id'] = pid
df = pd.DataFrame(rows)
p = peers_table[['peer_id', 'seq']]
df = df.merge(p, on='peer_id').drop(columns=['peer_id']).rename(columns={'seq': 'peer'})
return df.astype('int64')
def aggregate_metrics_to_pandas(metrics_filepaths, peers_table):
if len(metrics_filepaths) == 0:
return empty_metrics_dataframe()
pool = mp.Pool(mp.cpu_count())
args = [(f, peers_table) for f in metrics_filepaths]
results = pool.starmap(aggregate_metrics_to_pandas_single, args)
# concat all data frames into one
return pd.concat(results)
def cdf_to_pandas(cdf_filepath):
if os.path.exists(cdf_filepath):
return pd.read_csv(cdf_filepath, delim_whitespace=True, names=['delay_ms', 'count'], dtype='int64')
else:
return pd.DataFrame([], columns=['delay_ms', 'count'], dtype='int64')
def peer_info_to_pandas(peer_info_filename):
with open(peer_info_filename, 'rt') as f:
data = json.load(f)
peers = pd.json_normalize(data)
peers['honest'] = peers['type'] == 'honest'
return peers.astype({'type': 'category',
't_warm': 'datetime64[ns]',
't_connect': 'datetime64[ns]',
't_run': 'datetime64[ns]',
't_cool': 'datetime64[ns]',
't_complete': 'datetime64[ns]'})
def to_pandas(aggregate_output_dir, pandas_output_dir):
mkdirp(pandas_output_dir)
print('converting peer ids and info to pandas...')
peer_info_filename = os.path.join(aggregate_output_dir, 'peer-info.json')
peers = peer_info_to_pandas(peer_info_filename)
outfile = os.path.join(pandas_output_dir, 'peers.gz')
peers.to_pickle(outfile)
print('converting peer scores to pandas...')
scores_files = find_files(aggregate_output_dir, 'peer-scores*')
df = aggregate_peer_scores(scores_files, peers)
outfile = os.path.join(pandas_output_dir, 'scores.gz')
print('writing pandas peer scores to {}'.format(outfile))
df.to_pickle(outfile)
print('converting aggregate metrics to pandas...')
outfile = os.path.join(pandas_output_dir, 'metrics.gz')
metrics_files = find_files(aggregate_output_dir, '*aggregate.json')
df = aggregate_metrics_to_pandas(metrics_files, peers)
print('writing aggregate metrics pandas data to {}'.format(outfile))
df.to_pickle(outfile)
print('converting latency cdf to pandas...')
outfile = os.path.join(pandas_output_dir, 'cdf.gz')
cdf_file = os.path.join(aggregate_output_dir, 'tracestat-cdf.txt')
df = cdf_to_pandas(cdf_file)
print('writing cdf pandas data to {}'.format(outfile))
df.to_pickle(outfile)
def write_pandas(tables, output_dir):
pandas_dir = os.path.join(output_dir, 'pandas')
mkdirp(pandas_dir)
for name, df in tables.items():
fname = os.path.join(pandas_dir, '{}.gz'.format(name))
df.to_pickle(fname)
def load_pandas(analysis_dir):
analysis_dir = os.path.abspath(analysis_dir)
pandas_dir = os.path.join(analysis_dir, 'pandas')
if not os.path.exists(pandas_dir):
print('Cached pandas data not found. Converting analysis data from {} to pandas'.format(analysis_dir))
to_pandas(analysis_dir, pandas_dir)
tables = {}
for f in os.listdir(pandas_dir):
if not f.endswith('.gz'):
continue
name = os.path.splitext(f)[0]
tables[name] = pd.read_pickle(os.path.join(pandas_dir, f))
if 'cdf' in tables:
tables['pdf'] = cdf_to_pdf(tables['cdf'])
return tables
def test_params_panel(analysis_dir):
param_filename = os.path.join(analysis_dir, '..', 'template-params.toml')
with open(param_filename, 'rt') as f:
contents = f.read()
test_params = toml.loads(contents)
params_out = widgets.Output()
with params_out:
print(contents)
params_panel = widgets.Accordion([params_out])
params_panel.set_title(0, 'Test Parameters')
params_panel.selected_index = None
return (params_panel, test_params)
def save_fig_fn(dest, formats=['png', 'pdf']):
mkdirp(dest)
def save_fig(fig, filename, **kwargs):
try:
for fmt in formats:
base = os.path.splitext(filename)[0]
name = os.path.join(dest, '{}.{}'.format(base, fmt))
fig.savefig(name, format=fmt, **kwargs)
except BaseException as err:
print('Error saving figure to {}: {}'.format(filename, err))
return save_fig
def zipdir(path, ziph, extensions=['.png', '.pdf', '.eps', '.svg']):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
strs = os.path.splitext(file)
if len(strs) < 2:
continue
ext = strs[1]
if ext not in extensions:
continue
ziph.write(os.path.join(root, file))
def archive_figures(figure_dir, out_filename):
zipf = zipfile.ZipFile(out_filename, 'w', zipfile.ZIP_DEFLATED)
zipdir(figure_dir, zipf)
zipf.close()
def no_scores_message():
from IPython.display import display, Markdown
display(Markdown("""##### No peer score data, chart omitted"""))
def tracestat_summary(analysis_dir):
summary_file = os.path.join(analysis_dir, 'tracestat-summary.txt')
if os.path.exists(summary_file):
with open(summary_file, 'rt') as f:
return f.read()
else:
return('no tracestat summary file found')
def make_line(label, ax, x, color, alpha=0.5, linestyle='dashed'):
ax.axvline(x=x, linestyle=linestyle, color=color, alpha=alpha)
return mlines.Line2D([], [], color=color, linestyle=linestyle, label=label, alpha=alpha)
def make_span(label, ax, start, end, color, alpha=0.3):
ax.axvspan(start, end, facecolor=color, alpha=alpha)
return mpatches.Patch(color=color, alpha=alpha, label=label)
def annotate_times(ax, time_annotations, legend_anchor=None):
colors = sns.color_palette('Set2')
def next_color():
c = colors.pop(0)
colors.append(c)
return c
legends = []
for a in time_annotations:
t1 = a['time']
if pd.isnull(t1):
continue
label = a['label']
if 'end_time' in a:
# if we have an end_time, draw a span between start and end
t2 = a['end_time']
if | pd.isnull(t2) | pandas.isnull |
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
from nltk.stem.porter import PorterStemmer
from collections import Counter
import string
import pandas as pd
import numpy as np
from sklearn.externals import joblib
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.cross_validation import train_test_split
from sklearn import svm, grid_search
import matplotlib.pyplot as plt
from math import sqrt
import seaborn as sns
# Set style for seaborn
sns.set_style('whitegrid')
# TODO: Look into adding some form of sentiment analysis
class Analyzer:
"""
Class for carrying out the analysis and model creation/loading
"""
def __init__(self, data, labels=None, max_words=150, load_pca=False, load_svm=False, more_stop_words=[''],
use_sentiment=True):
self.data = data # Data matrix
self.labels = labels # Label array
# Text Mining
self.max_words = max_words
self.dtm = []
self.top_words = dict()
self.words = Counter()
self.more_stop_words = more_stop_words
# Principal Component Analysis
self.load_pca = load_pca # Load or compute the PCA?
self.pca = None
self.pcscores = None
self.loadings = None
self.load_squared = None
# Sentiment analysis
self.sentiment = None
self.use_sentiment = use_sentiment
# Support Vector Machine Classifier
self.load_svm = load_svm
self.svc = None
# Use stemming
self.porter = PorterStemmer()
# Set stop words
self.stop_words = set(stopwords.words('english'))
self.stop_words.update([s for s in string.punctuation] +
[u'\u2014', u'\u2019', u'\u201c', u'\xf3', u'\u201d', u'\u2014@', u'://', u'!"', u'"@',
u'."', u'.@', u'co', u'\u2026', u'&', u'&', u'amp', u'...', u'.\u201d', u'000',
u'\xed'])
# Political terms and Twitter handles to remove
self.stop_words.update(['hillary', 'clinton', 'donald', 'trump', 'clinton2016',
'trump2016', 'hillary2016', 'makeamericagreatagain'])
self.stop_words.update(['realdonaldtrump', 'hillaryclinton', 'berniesanders'])
self.stop_words.update(self.more_stop_words)
def create_full_model(self):
print('Getting top {} words...'.format(self.max_words))
self.get_words()
print('Creating document term matrix...')
self.create_dtm()
print('Running Principal Component Analysis...')
self.run_pca()
if self.use_sentiment:
print('Running Sentiment Analysis...')
self.get_sentiment()
print('Running Support Vector Machine Classifier...')
return self.run_svm()
def load_full_model(self):
self.load_words()
self.create_dtm()
self.run_pca()
if self.use_sentiment: self.get_sentiment()
return self.run_svm()
def get_words(self):
str_list = ' '.join([tweet for tweet in self.data])
self.words = Counter([self.porter.stem(i.lower()) for i in wordpunct_tokenize(str_list)
if i.lower() not in self.stop_words and not i.lower().startswith('http')])
self.top_words = dict(self.words.most_common(self.max_words))
def save_words(self, filename='words.pkl'):
joblib.dump(self.top_words, 'model/'+filename)
def load_words(self, filename='words.pkl'):
print('Loading model/{}'.format(filename))
self.top_words = joblib.load('model/'+filename)
def create_dtm(self):
dtm = []
for tweet in self.data:
# Make empty row
newrow = dict()
for term in self.top_words.keys():
newrow[term] = 0
tweetwords = [self.porter.stem(i.lower()) for i in wordpunct_tokenize(tweet)
if i.lower() not in self.stop_words and not i.lower().startswith('http')]
for word in tweetwords:
if word in self.top_words.keys():
newrow[word] += 1
dtm.append(newrow)
self.dtm = dtm
def get_sentiment(self):
# Load up the NRC emotion lexicon
filename = 'data/NRC-emotion-lexicon-wordlevel-alphabetized-v0.92.txt'
data = pd.read_csv(filename, delim_whitespace=True, skiprows=45, header=None, names=['word', 'affect', 'flag'])
emotion_words = dict()
emotion_map = dict()
affects = ['positive', 'negative', 'anger', 'anticipation', 'disgust',
'fear', 'joy', 'sadness', 'surprise', 'trust']
for key in affects:
emotion_words[key] = data[(data['affect'] == key) & (data['flag'] == 1)]['word'].tolist()
emotion_map[key] = list()
for text in self.data: # Note no stemming or it may fail to match words
words = Counter([i.lower() for i in wordpunct_tokenize(text)
if i.lower() not in self.stop_words and not i.lower().startswith('http')])
for key in emotion_words.keys():
x = set(emotion_words[key]).intersection(words.keys())
emotion_map[key].append(len(x))
self.sentiment = pd.DataFrame(emotion_map)
def run_pca(self, filename='pca.pkl'):
df_dtm = pd.DataFrame(self.dtm, columns=self.top_words.keys())
# Load or run the PCA
if self.load_pca:
print('Loading model/{}'.format(filename))
pca = joblib.load('model/'+filename)
else:
pca = PCA(n_components=0.8)
pca.fit(df_dtm)
pcscores = pd.DataFrame(pca.transform(df_dtm))
pcscores.columns = ['PC' + str(i + 1) for i in range(pcscores.shape[1])]
loadings = pd.DataFrame(pca.components_, columns=self.top_words.keys())
load_squared = loadings.transpose() ** 2
load_squared.columns = ['PC' + str(i + 1) for i in range(pcscores.shape[1])]
self.pcscores = pcscores
self.loadings = loadings
self.load_squared = load_squared
# Prep for save, just in case
self.pca = pca
def save_pca(self, filename='pca.pkl'):
joblib.dump(self.pca, 'model/' + filename)
def run_svm(self, filename='svm.pkl'):
if not self.load_svm:
if self.use_sentiment:
self.pcscores.index = range(len(self.pcscores))
data = | pd.concat([self.pcscores, self.sentiment], axis=1) | pandas.concat |
"""Test the surface_io module."""
from collections import OrderedDict
import pandas as pd
import pytest
import xtgeo
import fmu.dataio._utils as _utils
CFG = OrderedDict()
CFG["model"] = {"name": "Test", "revision": "21.0.0"}
CFG["masterdata"] = {
"smda": {
"country": [
{"identifier": "Norway", "uuid": "ad214d85-8a1d-19da-e053-c918a4889309"}
],
"discovery": [{"short_identifier": "abdcef", "uuid": "ghijk"}],
}
}
CFG["access"] = {
"asset": "Drogon",
"ssdl": {"access_level": "internal", "some_access_tag": True},
}
CFG["model"] = {"revision": "0.99.0"}
def test_uuid_from_string():
"""Testing that uuid from string is repeatable"""
string1 = "string1"
string2 = "string2"
uuid1 = _utils.uuid_from_string(string1)
uuid2 = _utils.uuid_from_string(string2)
uuidx = _utils.uuid_from_string(string1)
assert uuid1 != uuid2
assert uuidx == uuid1
def test_parse_parameters_txt():
"""Testing parsing of parameters.txt into a flat dictionary"""
ptext = "tests/data/drogon/ertrun1/realization-1/iter-0/parameters.txt"
res = _utils.read_parameters_txt(ptext)
assert res["SENSNAME"] == "rms_seed"
# Numbers in strings should be parsed as numbers:
assert res["GLOBVAR:VOLON_PERMH_CHANNEL"] == 1100
@pytest.mark.parametrize(
"flat_dict, nested_dict",
[
({}, {}),
({"foo": "bar"}, {"foo": "bar"}),
({"foo:bar": "com"}, {"foo": {"bar": "com"}}),
({"foo:bar:com": "hoi"}, {"foo": {"bar:com": "hoi"}}),
(
{"fo": "ba", "foo:bar": "com", "fooo:barr:comm": "hoi"},
{"fo": "ba", "foo": {"bar": "com"}, "fooo": {"barr:comm": "hoi"}},
),
(
{"foo:bar": "com", "foo:barr": "comm"},
{"foo": {"bar": "com", "barr": "comm"}},
),
# GEN_KW lookalike
(
{"foo:bar": "com1", "hoi:bar": "com2"},
{"foo": {"bar": "com1"}, "hoi": {"bar": "com2"}},
),
pytest.param({"foo:": "com"}, None, marks=pytest.mark.xfail(raises=ValueError)),
],
)
def test_nested_parameters(flat_dict, nested_dict):
assert _utils.nested_parameters_dict(flat_dict) == nested_dict
def test_parse_parameters_txt_justified():
"""Testing parsing of justified parameters.txt into nested dictionary"""
ptext = "tests/data/drogon/ertrun1/realization-0/iter-0/parameters_justified.txt"
res = _utils.nested_parameters_dict(_utils.read_parameters_txt(ptext))
assert res["SENSNAME"] == "rms_seed"
assert res["GLOBVAR"]["VOLON_PERMH_CHANNEL"] == 1100
assert res["LOG10_MULTREGT"]["MULT_VALYSAR_THERYS"] == -3.2582
def test_parse_parameters_txt_genkw():
"""Testing parsing of parameters.txt from GEN_KW"""
ptext = "tests/data/drogon/ertrun1/realization-0/iter-0/parameters_genkw.txt"
res = _utils.nested_parameters_dict(_utils.read_parameters_txt(ptext))
assert res["CATEGORY1"]["SOMENAME"] == -0.01
def test_get_object_name():
"""Test the method for getting name from a data object"""
# surface with no name, shall return None
surface = xtgeo.RegularSurface(ncol=3, nrow=4, xinc=22, yinc=22, values=0)
assert _utils.get_object_name(surface) is None
# surface with name, shall return the name
surface.name = "MySurfaceName"
assert _utils.get_object_name(surface) == "MySurfaceName"
# dataframe: shall return None
table = | pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) | pandas.DataFrame |
#codes to for analyse the model.
import re
import os
from astropy import units as u
from tardis import constants
import numpy as np
import pandas as pd
class LastLineInteraction(object):
@classmethod
def from_model(cls, model):
return cls(model.runner.last_line_interaction_in_id,
model.runner.last_line_interaction_out_id,
model.runner.last_line_interaction_shell_id,
model.runner.output_nu, model.plasma.atomic_data.lines)
def __init__(self, last_line_interaction_in_id,
last_line_interaction_out_id, last_line_interaction_shell_id,
output_nu, lines, packet_filter_mode='packet_nu'):
# mask out packets which did not perform a line interaction
# TODO mask out packets which do not escape to observer?
mask = last_line_interaction_out_id != -1
self.last_line_interaction_in_id = last_line_interaction_in_id[mask]
self.last_line_interaction_out_id = last_line_interaction_out_id[mask]
self.last_line_interaction_shell_id = last_line_interaction_shell_id[mask]
self.last_line_interaction_angstrom = output_nu.to(
u.Angstrom, equivalencies=u.spectral())[mask]
self.lines = lines
self._wavelength_start = 0 * u.angstrom
self._wavelength_end = np.inf * u.angstrom
self._atomic_number = None
self._ion_number = None
self.packet_filter_mode = packet_filter_mode
self.update_last_interaction_filter()
@property
def wavelength_start(self):
return self._wavelength_start.to('angstrom')
@wavelength_start.setter
def wavelength_start(self, value):
if not isinstance(value, u.Quantity):
raise ValueError('needs to be a Quantity')
self._wavelength_start = value
self.update_last_interaction_filter()
@property
def wavelength_end(self):
return self._wavelength_end.to('angstrom')
@wavelength_end.setter
def wavelength_end(self, value):
if not isinstance(value, u.Quantity):
raise ValueError('needs to be a Quantity')
self._wavelength_end = value
self.update_last_interaction_filter()
@property
def atomic_number(self):
return self._atomic_number
@atomic_number.setter
def atomic_number(self, value):
self._atomic_number = value
self.update_last_interaction_filter()
@property
def ion_number(self):
return self._ion_number
@ion_number.setter
def ion_number(self, value):
self._ion_number = value
self.update_last_interaction_filter()
def update_last_interaction_filter(self):
if self.packet_filter_mode == 'packet_nu':
packet_filter = (
(self.last_line_interaction_angstrom >
self.wavelength_start) &
(self.last_line_interaction_angstrom <
self.wavelength_end))
elif self.packet_filter_mode == 'line_in_nu':
line_in_nu = (
self.lines.wavelength.iloc[
self.last_line_interaction_in_id].values)
packet_filter = (
(line_in_nu > self.wavelength_start.to(u.angstrom).value) &
(line_in_nu < self.wavelength_end.to(u.angstrom).value))
self.last_line_in = self.lines.iloc[
self.last_line_interaction_in_id[packet_filter]]
self.last_line_out = self.lines.iloc[
self.last_line_interaction_out_id[packet_filter]]
if self.atomic_number is not None:
self.last_line_in = self.last_line_in.xs(
self.atomic_number, level='atomic_number', drop_level=False)
self.last_line_out = self.last_line_out.xs(
self.atomic_number, level='atomic_number', drop_level=False)
if self.ion_number is not None:
self.last_line_in = self.last_line_in.xs(
self.ion_number, level='ion_number', drop_level=False)
self.last_line_out = self.last_line_out.xs(
self.ion_number, level='ion_number', drop_level=False)
last_line_in_count = self.last_line_in.line_id.value_counts()
last_line_out_count = self.last_line_out.line_id.value_counts()
self.last_line_in_table = self.last_line_in.reset_index()[
[
'wavelength', 'atomic_number', 'ion_number',
'level_number_lower', 'level_number_upper']]
self.last_line_in_table['count'] = last_line_in_count
self.last_line_in_table.sort_values(by='count', ascending=False,
inplace=True)
self.last_line_out_table = self.last_line_out.reset_index()[
[
'wavelength', 'atomic_number', 'ion_number',
'level_number_lower', 'level_number_upper']]
self.last_line_out_table['count'] = last_line_out_count
self.last_line_out_table.sort_values(by='count', ascending=False,
inplace=True)
def plot_wave_in_out(self, fig, do_clf=True, plot_resonance=True):
if do_clf:
fig.clf()
ax = fig.add_subplot(111)
wave_in = self.last_line_list_in['wavelength']
wave_out = self.last_line_list_out['wavelength']
if plot_resonance:
min_wave = np.min([wave_in.min(), wave_out.min()])
max_wave = np.max([wave_in.max(), wave_out.max()])
ax.plot([min_wave, max_wave], [min_wave, max_wave], 'b-')
ax.plot(wave_in, wave_out, 'b.', picker=True)
ax.set_xlabel('Last interaction Wave in')
ax.set_ylabel('Last interaction Wave out')
def onpick(event):
print("-" * 80)
print("Line_in (%d/%d):\n%s" % (
len(event.ind), self.current_no_packets,
self.last_line_list_in.ix[event.ind]))
print("\n\n")
print("Line_out (%d/%d):\n%s" % (
len(event.ind), self.current_no_packets,
self.last_line_list_in.ix[event.ind]))
print("^" * 80)
def onpress(event):
pass
fig.canvas.mpl_connect('pick_event', onpick)
fig.canvas.mpl_connect('on_press', onpress)
class TARDISHistory(object):
"""
Records the history of the model
"""
def __init__(self, hdf5_fname, iterations=None):
self.hdf5_fname = hdf5_fname
if iterations is None:
iterations = []
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
for key in hdf_store.keys():
if key.split('/')[1] == 'atom_data':
continue
iterations.append(
int(re.match(r'model(\d+)', key.split('/')[1]).groups()[0]))
self.iterations = np.sort(np.unique(iterations))
hdf_store.close()
else:
self.iterations=iterations
self.levels = None
self.lines = None
def load_atom_data(self):
if self.levels is None or self.lines is None:
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
self.levels = hdf_store['atom_data/levels']
self.lines = hdf_store['atom_data/lines']
hdf_store.close()
def load_t_inner(self, iterations=None):
t_inners = []
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
t_inners.append(hdf_store['model%03d/configuration' %iter].ix['t_inner'])
hdf_store.close()
t_inners = np.array(t_inners)
return t_inners
def load_t_rads(self, iterations=None):
t_rads_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter%03d' % iter
t_rads_dict[current_iter] = hdf_store['model%03d/t_rads' % iter]
t_rads = pd.DataFrame(t_rads_dict)
hdf_store.close()
return t_rads
def load_ws(self, iterations=None):
ws_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
ws_dict[current_iter] = hdf_store['model{:03d}/ws'.format(iter)]
hdf_store.close()
return pd.DataFrame(ws_dict)
def load_level_populations(self, iterations=None):
level_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter%03d' % iter
level_populations_dict[current_iter] = hdf_store[
'model{:03d}/level_populations'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(level_populations_dict.values()[0])
else:
return pd.Panel(level_populations_dict)
def load_jblues(self, iterations=None):
jblues_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
jblues_dict[current_iter] = hdf_store[
'model{:03d}/j_blues'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(jblues_dict.values()[0])
else:
return pd.Panel(jblues_dict)
def load_ion_populations(self, iterations=None):
ion_populations_dict = {}
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
is_scalar = False
if iterations is None:
iterations = self.iterations
elif np.isscalar(iterations):
is_scalar = True
iterations = [self.iterations[iterations]]
else:
iterations = self.iterations[iterations]
for iter in iterations:
current_iter = 'iter{:03d}'.format(iter)
ion_populations_dict[current_iter] = hdf_store[
'model{:03d}/ion_populations'.format(iter)]
hdf_store.close()
if is_scalar:
return pd.DataFrame(ion_populations_dict.values()[0])
else:
return pd.Panel(ion_populations_dict)
def load_spectrum(self, iteration, spectrum_keyword='luminosity_density'):
hdf_store = pd.HDFStore(self.hdf5_fname, 'r')
spectrum = hdf_store['model%03d/%s' % (self.iterations[iteration], spectrum_keyword)]
hdf_store.close()
return spectrum
def calculate_relative_lte_level_populations(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:,0])
species_levels = self.levels.ix[species]
relative_lte_level_populations = (
(species_levels.g.values[np.newaxis].T /
float(species_levels.g.loc[0])) *
np.exp(-beta_rads * species_levels.energy.values[np.newaxis].T))
return pd.DataFrame(relative_lte_level_populations, index=species_levels.index)
def calculate_departure_coefficients(self, species, iteration=-1):
self.load_atom_data()
t_rads = self.load_t_rads(iteration)
beta_rads = 1 / (constants.k_B.cgs.value * t_rads.values[:,0])
species_levels = self.levels.ix[species]
species_level_populations = self.load_level_populations(iteration).ix[species]
departure_coefficient = ((species_level_populations.values * species_levels.g.ix[0]) /
(species_level_populations.ix[0].values * species_levels.g.values[np.newaxis].T)) \
* np.exp(beta_rads * species_levels.energy.values[np.newaxis].T)
return pd.DataFrame(departure_coefficient, index=species_levels.index)
def get_last_line_interaction(self, iteration=-1):
iteration = self.iterations[iteration]
self.load_atom_data()
hdf_store = | pd.HDFStore(self.hdf5_fname, 'r') | pandas.HDFStore |
'''
SYNBIOCHEM (c) University of Manchester. 2018
PathwayGenie is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: neilswainston
'''
# pylint: disable=broad-except
# pylint: disable=invalid-name
# pylint: disable=no-member
import json
import sys
from rdkit import Chem, DataStructs
from rdkit.Chem.Fingerprints import FingerprintMols
import pandas as pd
def analyse(filename_in, filename_out):
'''Analyse similarities.'''
with open(filename_in) as json_data:
data = json.load(json_data)
for entry in data['reaction']:
entry['similarity'] = get_similarity(entry['reactant_smile'],
entry['product_smile'])
# Write updated json:
with open(filename_out + '.json', 'w') as outfile:
json.dump(data, outfile)
# Write Dataframe:
df = | pd.DataFrame(data['reaction']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.