prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 22 11:05:21 2018
@author: 028375
"""
from __future__ import unicode_literals, division
import pandas as pd
import os.path
import numpy as np
def Check2(lastmonth,thismonth,collateral):
ContractID=(thismonth['ContractID'].append(lastmonth['ContractID'])).append(collateral['ContractID']).drop_duplicates()
Outputs=pd.DataFrame(ContractID).reset_index(drop=True)
cost0=lastmonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost0,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期初表Upfront','期权标的':'期初表期权标的','标的类型':'期初表标的类型'})
cost1=thismonth[['ContractID','期权标的','标的类型','Upfront结算货币']]
Outputs=pd.merge(Outputs,cost1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Upfront结算货币':'期末表Upfront','期权标的':'期末表期权标的','标的类型':'期末表标的类型'})
tmp1=collateral.groupby(['ContractID'])[['期权标的','标的类型']].first().reset_index()
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'期权标的':'资金表期权标的','标的类型':'资金表标的类型'})
collateral1=collateral.groupby(['ContractID','现金流类型'])['确认金额(结算货币)'].sum().reset_index()
collateral1=collateral1.rename(columns={'现金流类型':'CashType','确认金额(结算货币)':'Amount'})
tmp1=collateral1[collateral1['CashType']=='前端支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端支付'})
tmp1=collateral1[collateral1['CashType']=='前端期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'前端期权费'})
tmp1=collateral1[collateral1['CashType']=='展期期权费'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'展期期权费'})
tmp1=collateral1[collateral1['CashType']=='到期结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'到期结算'})
tmp1=collateral1[collateral1['CashType']=='部分赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'部分赎回'})
tmp1=collateral1[collateral1['CashType']=='全部赎回'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'全部赎回'})
tmp1=collateral1[collateral1['CashType']=='期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'期间结算'})
tmp1=collateral1[collateral1['CashType']=='红利支付'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'红利支付'})
tmp1=collateral1[collateral1['CashType']=='其他'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'其他'})
tmp1=collateral1[collateral1['CashType']=='定结期间结算'][['ContractID','Amount']]
Outputs=pd.merge(Outputs,tmp1,how='left',on='ContractID')
Outputs=Outputs.rename(columns={'Amount':'定结期间结算'})
Outputs['status1']=''
flag1=np.isnan(Outputs['期初表Upfront'])
flag2=np.isnan(Outputs['期末表Upfront'])
Outputs.loc[flag1&flag2,['status1']]='新起到期'
Outputs.loc[(~flag1)&flag2,['status1']]='存续到期'
Outputs.loc[flag1&(~flag2),['status1']]='新起存续'
Outputs.loc[(~flag1)&(~flag2),['status1']]='两期存续'
Outputs['status2']=''
flag1=(Outputs['status1']=='新起到期')
flag2=(Outputs['status1']=='存续到期')
flag3=(Outputs['status1']=='新起存续')
flag4=(Outputs['status1']=='两期存续')
colflag1=np.isnan(Outputs['前端支付'])
colflag2=np.isnan(Outputs['前端期权费'])
colflag3=np.isnan(Outputs['展期期权费'])
colflag4=np.isnan(Outputs['到期结算'])
colflag5=np.isnan(Outputs['全部赎回'])
colflag6=np.isnan(Outputs['部分赎回'])
colflag7=np.isnan(Outputs['定结期间结算']) #update 0.2.3
tmp1=Outputs[['ContractID','期初表Upfront','期末表Upfront','前端支付','前端期权费','展期期权费','到期结算','部分赎回','全部赎回','定结期间结算']]
tmp1=tmp1.replace(np.nan,0.)
flag5=(tmp1['期末表Upfront']!=0)
flag6=(tmp1['期末表Upfront']-tmp1['期初表Upfront']).round(decimals=4)==0
flag7=(tmp1['期末表Upfront']-tmp1['前端支付']).round(decimals=4)==0
flag8=(tmp1['期末表Upfront']-(tmp1['前端期权费']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0
#flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回'])).round(decimals=4)==0 #update 0.2.3
flag9=(tmp1['期末表Upfront']-(tmp1['期初表Upfront']+tmp1['展期期权费']+tmp1['部分赎回']+tmp1['定结期间结算'])).round(decimals=4)==0 # update 0.2.3 增加定结期间结算
#新起到期
Outputs.loc[flag1,['status2']]='流水异常'
# Outputs.loc[flag1&((~colflag1)|(~colflag2))&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag1&((~colflag4)|(~colflag5)),['status2']]='流水正常' #update 0.2.3
#存续到期
Outputs.loc[flag2,['status2']]='流水异常'
Outputs.loc[flag2&((~colflag4)|(~colflag5)),['status2']]='流水正常'
#新起存续
Outputs.loc[flag3,['status2']]='流水异常'
Outputs.loc[flag3&flag5&((~colflag1)|(~colflag2))&colflag4&colflag5,['status2']]='流水正常'
tmp_flag=((~colflag1)&tmp1['前端支付']!=0)|((~colflag2)&tmp1['前端期权费']!=0) #前端支付/前端期权费存在,且不等于0
Outputs.loc[flag3&(~flag5)&(colflag4&colflag5)&(~tmp_flag),['status2']]='流水正常'
#两期存续
Outputs.loc[flag4,['status2']]='流水异常'
Outputs.loc[flag4&flag6&(colflag3&colflag6&colflag4&colflag5),['status2']]='流水正常'
# Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)&colflag4&colflag5),['status2']]='流水正常' #update 0.2.3
Outputs.loc[flag4&(~flag6)&((~colflag3)|(~colflag6)|(~colflag7)&colflag4&colflag5),['status2']]='流水正常' #增加定结期间结算 #update 0.2.3
Outputs['status3']=''
flag10=(Outputs['status2']=='流水异常')
Outputs.loc[flag10,['status3']]='流水异常,未验证金额'
Outputs.loc[(~flag10)&flag1,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag2,['status3']]='无需验证金额'
Outputs.loc[(~flag10)&flag3,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag3&(flag7|flag8|(~flag5)),['status3']]='金额正常'
Outputs.loc[(~flag10)&flag4,['status3']]='金额异常'
Outputs.loc[(~flag10)&flag4&(flag6|flag9),['status3']]='金额正常'
return Outputs
def Check1(lastmonth,thismonth,collateral):
thismonth['Upfront结算货币']=pd.to_numeric(thismonth['Upfront结算货币'],errors='coerce')
lastmonth['Upfront结算货币']=pd.to_numeric(lastmonth['Upfront结算货币'],errors='coerce')
thismonth['Upfront结算货币']=thismonth['Upfront结算货币'].replace(np.nan,0.)
lastmonth['Upfront结算货币']=lastmonth['Upfront结算货币'].replace(np.nan,0.)
lastmonth['MATURITYDATEREAL']=pd.to_datetime(lastmonth['MATURITYDATEREAL'])
thismonth=thismonth.rename(columns={'起始日':'EffectDate'})
thismonth['EffectDate']=pd.to_datetime(thismonth['EffectDate'])
thismonth=thismonth.rename(columns={'合约编号':'ContractID'})
lastmonth=lastmonth.rename(columns={'合约编号':'ContractID'})
collateral=collateral.rename(columns={'交易编号':'ContractID'})
collateral['现金流产生日期']=pd.to_datetime(collateral['现金流产生日期'])
collateral['确认金额(结算货币)']=pd.to_numeric(collateral['确认金额(结算货币)'],errors='coerce')
collateral['确认金额(结算货币)']=collateral['确认金额(结算货币)'].replace(np.nan,0.)
return lastmonth,thismonth,collateral
def Check0(lastmonth,thismonth,collateral):
lastmonth_dupl=lastmonth[lastmonth.duplicated(subset='合约编号')]
thismonth_dupl=thismonth[thismonth.duplicated(subset='合约编号')]
collateral_dupl=collateral[collateral.duplicated()]
lastmonth=lastmonth.drop_duplicates(subset='合约编号')
thismonth=thismonth.drop_duplicates(subset='合约编号')
collateral=collateral.drop_duplicates(subset=['交易编号','现金流类型','现金流产生日期','确认金额(结算货币)'])
flag1=collateral['现金流类型']!='前端支付'
flag2=collateral['现金流类型']!='前端期权费'
flag3=collateral['现金流类型']!='展期期权费'
flag4=collateral['现金流类型']!='到期结算'
flag5=collateral['现金流类型']!='部分赎回'
flag6=collateral['现金流类型']!='全部赎回'
flag7=collateral['现金流类型']!='期间结算'
flag8=collateral['现金流类型']!='红利支付'
flag9=collateral['现金流类型']!='其他'
flag10=collateral['现金流类型']!='定结期间结算'
collateral_newtype=collateral[flag1&flag2&flag3&flag4&flag5&flag6&flag7&flag8&flag9&flag10]
return lastmonth,thismonth,collateral,lastmonth_dupl,thismonth_dupl,collateral_dupl,collateral_newtype
if __name__=="__main__":
path0=os.path.dirname(os.path.realpath(__file__))+'//'
spotdate=pd.to_datetime('2017-11-30')
lastdate= | pd.to_datetime('2017-12-22') | pandas.to_datetime |
'''
MIT License
Copyright (c) [2018] [<NAME>]
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
'''
import random
import pandas as pd
import numpy as np
def infer_feature_type(feature):
"""Infer data types for the given feature using simple logic.
Possible data types to infer: boolean, category, date, float, integer
Feature that is not either a boolean, a date, a float or an integer,
is classified as an object.
Parameters
----------
feature : array-like
A feature/attribute vector.
Returns
-------
data_type : string
The data type of the given feature/attribute.
"""
types = ["datetime64[ns]", "float64", "int64", "object"]
weights = [0, 0, 0, 0] # Weights corresponding to the data types
feature_len = len(feature)
indices_number = int(0.1 * feature_len) # Number of different values to check in a feature
indices = random.sample(range(0, feature_len), min(indices_number, feature_len)) # Array of random indices
# If the feature only contains two different unique values, then infer it as boolean
if len( | pd.unique(feature) | pandas.unique |
# -*- coding: utf-8 -*-
# Example package with a console entry point
"""Reads and formats data from the SWMM 5 output file."""
from __future__ import absolute_import, print_function
import copy
import datetime
import os
import struct
import sys
import warnings
from builtins import object, range, str, zip
import mando
import numpy as np
import pandas as pd
from mando.rst_text_formatter import RSTHelpFormatter
from tstoolbox import tsutils
PROPCODE = {
0: {1: "Area"},
1: {0: "Type", 2: "Inv_elev", 3: "Max_depth"},
2: {0: "Type", 4: "Inv_offset", 3: "Max_depth", 5: "Length"},
}
# Names for the 'Node type' and 'Link type' codes above
TYPECODE = {
0: {1: "Area"},
1: {0: "Junction", 1: "Outfall", 2: "Storage", 3: "Divider"}, # nodes
2: {0: "Conduit", 1: "Pump", 2: "Orifice", 3: "Weir", 4: "Outlet"}, # links
}
VARCODE = {
0: {
0: "Rainfall",
1: "Snow_depth",
2: "Evaporation_loss",
3: "Infiltration_loss",
4: "Runoff_rate",
5: "Groundwater_outflow",
6: "Groundwater_elevation",
7: "Soil_moisture",
},
1: {
0: "Depth_above_invert",
1: "Hydraulic_head",
2: "Volume_stored_ponded",
3: "Lateral_inflow",
4: "Total_inflow",
5: "Flow_lost_flooding",
},
2: {
0: "Flow_rate",
1: "Flow_depth",
2: "Flow_velocity",
3: "Froude_number",
4: "Capacity",
},
4: {
0: "Air_temperature",
1: "Rainfall",
2: "Snow_depth",
3: "Evaporation_infiltration",
4: "Runoff",
5: "Dry_weather_inflow",
6: "Groundwater_inflow",
7: "RDII_inflow",
8: "User_direct_inflow",
9: "Total_lateral_inflow",
10: "Flow_lost_to_flooding",
11: "Flow_leaving_outfalls",
12: "Volume_stored_water",
13: "Evaporation_rate",
14: "Potential_PET",
},
}
# Prior to 5.10.10
VARCODE_OLD = {
0: {
0: "Rainfall",
1: "Snow_depth",
2: "Evaporation_loss",
3: "Runoff_rate",
4: "Groundwater_outflow",
5: "Groundwater_elevation",
},
1: {
0: "Depth_above_invert",
1: "Hydraulic_head",
2: "Volume_stored_ponded",
3: "Lateral_inflow",
4: "Total_inflow",
5: "Flow_lost_flooding",
},
2: {
0: "Flow_rate",
1: "Flow_depth",
2: "Flow_velocity",
3: "Froude_number",
4: "Capacity",
},
4: {
0: "Air_temperature",
1: "Rainfall",
2: "Snow_depth",
3: "Evaporation_infiltration",
4: "Runoff",
5: "Dry_weather_inflow",
6: "Groundwater_inflow",
7: "RDII_inflow",
8: "User_direct_inflow",
9: "Total_lateral_inflow",
10: "Flow_lost_to_flooding",
11: "Flow_leaving_outfalls",
12: "Volume_stored_water",
13: "Evaporation_rate",
},
}
# swmm_flowunits is here, but currently not used.
_SWMM_FLOWUNITS = {0: "CFS", 1: "GPM", 2: "MGD", 3: "CMS", 4: "LPS", 5: "LPD"}
_LOCAL_DOCSTRINGS = tsutils.docstrings
_LOCAL_DOCSTRINGS[
"filename"
] = """filename : str
Filename of SWMM output file. The SWMM model must complete
successfully for "swmmtoolbox" to correctly read it.
"""
_LOCAL_DOCSTRINGS[
"itemtype"
] = """itemtype : str
One of 'system', 'node', 'link', or 'pollutant' to identify the
type of data you want to extract.
"""
_LOCAL_DOCSTRINGS[
"labels"
] = """labels : str
The remaining arguments uniquely identify a time-series
in the binary file. The format is::
'TYPE,NAME,VAR'
For example: 'link,41a,Flow_rate node,C63,1 ...'
The VAR part of the label can be the name of the variable or the index.
The available variables and their indices can be found using::
'swmmtoolbox listvariables filename.out'
All of the available labels can be listed with::
'swmmtoolbox catalog filename.out'
There is a wild card feature for the labels, where leaving the part out
will return all labels that match all other parts. For example,
+-----------------+-------------------------------------+
| link,b52, | Return all variables for link "b52" |
+-----------------+-------------------------------------+
| link,,Flow_rate | Return "Flow_rate" for all links |
+-----------------+-------------------------------------+
Note that all labels require two commas and no spaces.
"""
def tupleSearch(findme, haystack):
"""Partial search of list of tuples.
The "findme" argument is a tuple and this will find matches in "haystack"
which is a list of tuples of the same size as "findme". An empty string as
an item in "findme" is used as a wildcard for that item when searching
"haystack".
"""
match = []
for words in haystack:
testmatch = []
for i, j in zip(findme, words):
if not i:
testmatch.append(True)
continue
if i == j:
testmatch.append(True)
continue
testmatch.append(False)
if all(testmatch):
match.append(words)
return match
class SwmmExtract(object):
"""The class that handles all extraction of data from the out file."""
def __init__(self, filename):
self.RECORDSIZE = 4
self.fp = open(filename, "rb")
self.fp.seek(-6 * self.RECORDSIZE, 2)
(
self.Namesstartpos,
self.offset0,
self.startpos,
self.swmm_nperiods,
errcode,
magic2,
) = struct.unpack("6i", self.fp.read(6 * self.RECORDSIZE))
self.fp.seek(0, 0)
magic1 = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
if magic1 != 516114522:
raise ValueError(
"""
*
* Beginning magic number incorrect.
*
"""
)
if magic2 != 516114522:
raise ValueError(
"""
*
* Ending magic number incorrect.
*
"""
)
if errcode != 0:
raise ValueError(
"""
*
* Error code "{0}" in output file indicates a problem with the run.
*
""".format(
errcode
)
)
if self.swmm_nperiods == 0:
raise ValueError(
"""
*
* There are zero time periods in the output file.
*
"""
)
# --- otherwise read additional parameters from start of file
(
version,
self.swmm_flowunits,
self.swmm_nsubcatch,
self.swmm_nnodes,
self.swmm_nlinks,
self.swmm_npolluts,
) = struct.unpack("6i", self.fp.read(6 * self.RECORDSIZE))
if version < 5100:
varcode = VARCODE_OLD
else:
varcode = VARCODE
self.itemlist = ["subcatchment", "node", "link", "pollutant", "system"]
# Read in the names
self.fp.seek(self.Namesstartpos, 0)
self.names = {0: [], 1: [], 2: [], 3: [], 4: []}
number_list = [
self.swmm_nsubcatch,
self.swmm_nnodes,
self.swmm_nlinks,
self.swmm_npolluts,
]
for i, j in enumerate(number_list):
for _ in range(j):
stringsize = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.names[i].append(
struct.unpack("{0}s".format(stringsize), self.fp.read(stringsize))[
0
]
)
# Stupid Python 3
for key in self.names:
collect_names = []
for name in self.names[key]:
# Why would SWMM allow spaces in names? Anyway...
try:
rname = str(name, "ascii", "replace")
except TypeError:
rname = name.decode("ascii", "replace")
try:
collect_names.append(rname.decode())
except AttributeError:
collect_names.append(rname)
self.names[key] = collect_names
# Update self.varcode to add pollutant names to subcatchment,
# nodes, and links.
self.varcode = copy.deepcopy(varcode)
for itemtype in ["subcatchment", "node", "link"]:
typenumber = self.type_check(itemtype)
start = len(varcode[typenumber])
end = start + len(self.names[3])
nlabels = list(range(start, end))
ndict = dict(list(zip(nlabels, self.names[3])))
self.varcode[typenumber].update(ndict)
# Read pollutant concentration codes
# = Number of pollutants * 4 byte integers
self.pollutant_codes = struct.unpack(
"{0}i".format(self.swmm_npolluts),
self.fp.read(self.swmm_npolluts * self.RECORDSIZE),
)
self.propcode = {}
# self.prop[0] contain property codes and values for
# subcatchments
# self.prop[1] contain property codes and values for nodes
# self.prop[2] contain property codes and values for links
self.prop = {0: [], 1: [], 2: []}
# subcatchments
nsubprop = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.propcode[0] = struct.unpack(
"{0}i".format(nsubprop), self.fp.read(nsubprop * self.RECORDSIZE)
)
for i in range(self.swmm_nsubcatch):
rprops = struct.unpack(
"{0}f".format(nsubprop), self.fp.read(nsubprop * self.RECORDSIZE)
)
self.prop[0].append(list(zip(self.propcode[0], rprops)))
# nodes
nnodeprop = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.propcode[1] = struct.unpack(
"{0}i".format(nnodeprop), self.fp.read(nnodeprop * self.RECORDSIZE)
)
for i in range(self.swmm_nnodes):
rprops = struct.unpack(
"{0}f".format(nnodeprop), self.fp.read(nnodeprop * self.RECORDSIZE)
)
self.prop[1].append(list(zip(self.propcode[1], rprops)))
# links
nlinkprop = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.propcode[2] = struct.unpack(
"{0}i".format(nlinkprop), self.fp.read(nlinkprop * self.RECORDSIZE)
)
for i in range(self.swmm_nlinks):
rprops = struct.unpack(
"{0}f".format(nlinkprop), self.fp.read(nlinkprop * self.RECORDSIZE)
)
self.prop[2].append(list(zip(self.propcode[2], rprops)))
self.vars = {}
self.swmm_nsubcatchvars = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.vars[0] = struct.unpack(
"{0}i".format(self.swmm_nsubcatchvars),
self.fp.read(self.swmm_nsubcatchvars * self.RECORDSIZE),
)
self.nnodevars = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.vars[1] = struct.unpack(
"{0}i".format(self.nnodevars),
self.fp.read(self.nnodevars * self.RECORDSIZE),
)
self.nlinkvars = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.vars[2] = struct.unpack(
"{0}i".format(self.nlinkvars),
self.fp.read(self.nlinkvars * self.RECORDSIZE),
)
self.vars[3] = [0]
self.nsystemvars = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.vars[4] = struct.unpack(
"{0}i".format(self.nsystemvars),
self.fp.read(self.nsystemvars * self.RECORDSIZE),
)
# System vars do not have names per se, but made names = number labels
self.names[4] = [self.varcode[4][i] for i in self.vars[4]]
self.startdate = struct.unpack("d", self.fp.read(2 * self.RECORDSIZE))[0]
days = int(self.startdate)
seconds = (self.startdate - days) * 86400
self.startdate = datetime.datetime(1899, 12, 30) + datetime.timedelta(
days=days, seconds=seconds
)
self.reportinterval = struct.unpack("i", self.fp.read(self.RECORDSIZE))[0]
self.reportinterval = datetime.timedelta(seconds=self.reportinterval)
# Calculate the bytes for each time period when
# reading the computed results
self.bytesperperiod = self.RECORDSIZE * (
2
+ self.swmm_nsubcatch * self.swmm_nsubcatchvars
+ self.swmm_nnodes * self.nnodevars
+ self.swmm_nlinks * self.nlinkvars
+ self.nsystemvars
)
def type_check(self, itemtype):
if itemtype in [0, 1, 2, 3, 4]:
return itemtype
try:
typenumber = self.itemlist.index(itemtype)
except ValueError:
raise ValueError(
"""
*
* Type argument "{0}" is incorrect.
* Must be in "{1}".
*
""".format(
itemtype, list(range(5)) + self.itemlist
)
)
return typenumber
def name_check(self, itemtype, itemname):
self.itemtype = self.type_check(itemtype)
try:
itemindex = self.names[self.itemtype].index(str(itemname))
except (ValueError, KeyError):
raise ValueError(
"""
*
* {0} was not found in "{1}" list.
*
""".format(
itemname, itemtype
)
)
return (itemname, itemindex)
def get_swmm_results(self, itemtype, name, variableindex, period):
if itemtype not in [0, 1, 2, 4]:
raise ValueError(
"""
*
* Type must be one of subcatchment (0), node (1). link (2), or system (4).
* You gave "{0}".
*
""".format(
itemtype
)
)
_, itemindex = self.name_check(itemtype, name)
date_offset = self.startpos + period * self.bytesperperiod
# Rewind
self.fp.seek(date_offset, 0)
date = struct.unpack("d", self.fp.read(2 * self.RECORDSIZE))[0]
offset = date_offset + 2 * self.RECORDSIZE # skip the date
if itemtype == 0:
offset = offset + self.RECORDSIZE * (itemindex * self.swmm_nsubcatchvars)
elif itemtype == 1:
offset = offset + self.RECORDSIZE * (
self.swmm_nsubcatch * self.swmm_nsubcatchvars
+ itemindex * self.nnodevars
)
elif itemtype == 2:
offset = offset + self.RECORDSIZE * (
self.swmm_nsubcatch * self.swmm_nsubcatchvars
+ self.swmm_nnodes * self.nnodevars
+ itemindex * self.nlinkvars
)
elif itemtype == 4:
offset = offset + self.RECORDSIZE * (
self.swmm_nsubcatch * self.swmm_nsubcatchvars
+ self.swmm_nnodes * self.nnodevars
+ self.swmm_nlinks * self.nlinkvars
)
offset = offset + self.RECORDSIZE * variableindex
self.fp.seek(offset, 0)
value = struct.unpack("f", self.fp.read(self.RECORDSIZE))[0]
return (date, value)
def get_dates(self):
"""Return start and end date tuple."""
begindate = datetime.datetime(1899, 12, 30)
ntimes = list(range(self.swmm_nperiods))
periods = [ntimes[0], ntimes[-1]]
st_end = []
for period in periods:
date_offset = self.startpos + period * self.bytesperperiod
self.fp.seek(date_offset, 0)
day = struct.unpack("d", self.fp.read(2 * self.RECORDSIZE))[0]
st_end.append(begindate + datetime.timedelta(days=int(day)))
return st_end
@mando.command()
def about():
"""Display version number and system information."""
tsutils.about(__name__)
@mando.command("catalog", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def catalog_cli(filename, itemtype="", tablefmt="csv_nos", header="default"):
"""List the catalog of objects in output file.
This catalog list is all of the labels that can be used in the extract
routine.
Parameters
----------
{filename}
{itemtype}
{tablefmt}
{header}
"""
if header == "default":
header = ["TYPE", "NAME", "VARIABLE"]
tsutils._printiso(
catalog(filename, itemtype=itemtype), headers=header, tablefmt=tablefmt
)
def catalog(filename, itemtype=""):
"""List the catalog of objects in output file."""
obj = SwmmExtract(filename)
if itemtype:
typenumber = obj.type_check(itemtype)
plist = [typenumber]
else:
plist = list(range(len(obj.itemlist)))
collect = []
for i in plist:
typenumber = obj.type_check(obj.itemlist[i])
for oname in obj.names[i]:
if obj.itemlist[i] == "pollutant":
continue
if obj.itemlist[i] == "system":
collect.append(["system", oname, oname])
continue
for j in obj.vars[typenumber]:
collect.append([obj.itemlist[i], oname, obj.varcode[typenumber][j]])
return collect
@mando.command("listdetail", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def listdetail_cli(filename, itemtype, name="", tablefmt="simple", header="default"):
"""List nodes and metadata in output file.
Parameters
----------
{filename}
{itemtype}
name : str
[optional, default is '']
Specific name to print only that entry. This can be
looked up using 'listvariables'.
{tablefmt}
{header}
"""
tsutils._printiso(
listdetail(filename, itemtype, name=name, header=header), tablefmt=tablefmt
)
def listdetail(filename, itemtype, name="", header="default"):
"""List nodes and metadata in output file."""
obj = SwmmExtract(filename)
typenumber = obj.type_check(itemtype)
if name:
objectlist = [obj.name_check(itemtype, name)[0]]
else:
objectlist = obj.names[typenumber]
propnumbers = obj.propcode[typenumber]
if header == "default":
header = ["#Name"] + [PROPCODE[typenumber][i] for i in propnumbers]
collect = []
for i, oname in enumerate(objectlist):
printvar = [oname]
for j in obj.prop[typenumber][i]:
if j[0] == 0:
try:
printvar.append(TYPECODE[typenumber][j[1]])
except KeyError:
printvar.append(TYPECODE[typenumber][0])
else:
printvar.append(j[1])
collect.append(printvar)
df = pd.DataFrame(collect)
cheader = []
for head in header:
if head not in cheader:
cheader.append(head)
else:
cnt = cheader.count(head)
cheader.append("{0}.{1}".format(head, cnt))
df.columns = cheader
return df
@mando.command("listvariables", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def listvariables_cli(filename, tablefmt="csv_nos", header="default"):
"""List variables available for each type.
The type are "subcatchment", "node", "link", "pollutant", "system".
Parameters
----------
{filename}
{tablefmt}
{header}
"""
tsutils._printiso(listvariables(filename, header=header), tablefmt=tablefmt)
def listvariables(filename, header="default"):
"""List variables available for each type."""
obj = SwmmExtract(filename)
if header == "default":
header = ["TYPE", "DESCRIPTION", "VARINDEX"]
# 'pollutant' really isn't it's own itemtype
# but part of subcatchment, node, and link...
collect = []
for itemtype in ["subcatchment", "node", "link", "system"]:
typenumber = obj.type_check(itemtype)
for i in obj.vars[typenumber]:
try:
collect.append([itemtype, obj.varcode[typenumber][i].decode(), i])
except (TypeError, AttributeError):
collect.append([itemtype, str(obj.varcode[typenumber][i]), str(i)])
return collect
@mando.command("stdtoswmm5", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def stdtoswmm5_cli(start_date=None, end_date=None, input_ts="-"):
"""Take the toolbox standard format and return SWMM5 format.
Toolbox standard::
Datetime, Column_Name
2000-01-01 00:00:00 , 45.6
2000-01-01 01:00:00 , 45.2
...
SWMM5 format::
; comment line
01/01/2000 00:00, 45.6
01/01/2000 01:00, 45.2
...
Parameters
----------
{input_ts}
{start_date}
{end_date}
"""
tsutils._printiso(
stdtoswmm5(start_date=start_date, end_date=end_date, input_ts=input_ts)
)
def stdtoswmm5(start_date=None, end_date=None, input_ts="-"):
"""Take the toolbox standard format and return SWMM5 format."""
import csv
sys.tracebacklimit = 1000
tsd = tsutils.read_iso_ts(input_ts)[start_date:end_date]
try:
# Header
print(";Datetime,", ", ".join(str(i) for i in tsd.columns))
# Data
cols = tsd.columns.tolist()
tsd["date_tmp_tstoolbox"] = tsd.index.format(
formatter=lambda x: x.strftime("%m/%d/%Y")
)
tsd["time_tmp_tstoolbox"] = tsd.index.format(
formatter=lambda x: x.strftime("%H:%M:%S")
)
tsd.to_csv(
sys.stdout,
float_format="%g",
header=False,
index=False,
cols=["date_tmp_tstoolbox", "time_tmp_tstoolbox"] + cols,
sep=" ",
quoting=csv.QUOTE_NONE,
)
except IOError:
return
@mando.command(formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def getdata(filename, *labels):
"""DEPRECATED: Use 'extract' instead."""
return extract(filename, *labels)
@mando.command("extract", formatter_class=RSTHelpFormatter, doctype="numpy")
@tsutils.doc(_LOCAL_DOCSTRINGS)
def extract_cli(filename, *labels):
"""Get the time series data for a particular object and variable.
Parameters
----------
{filename}
{labels}
"""
tsutils._printiso(extract(filename, *labels))
def extract(filename, *labels):
"""Get the time series data for a particular object and variable."""
obj = SwmmExtract(filename)
nlabels = []
if isinstance(labels, (list, tuple)) and len(labels) == 1:
labels = labels[0]
for label in labels:
words = tsutils.make_list(label, n=3)
if None not in words:
nlabels.append(words)
continue
try:
words[2] = int(words[2])
typenumber = obj.type_check(words[2])
words[2] = obj.varcode[typenumber][words[2]]
except (ValueError, TypeError):
pass
words = [str(i) if i is not None else None for i in words]
res = tupleSearch(words, catalog(filename))
nlabels = nlabels + res
jtsd = []
for itemtype, name, variablename in nlabels:
typenumber = obj.type_check(itemtype)
name = obj.name_check(itemtype, name)[0]
inv_varcode_map = dict(
zip(obj.varcode[typenumber].values(), obj.varcode[typenumber].keys())
)
try:
variableindex = inv_varcode_map[int(variablename)]
except ValueError:
variableindex = inv_varcode_map[variablename]
begindate = datetime.datetime(1899, 12, 30)
dates = []
values = []
for time in range(obj.swmm_nperiods):
date, value = obj.get_swmm_results(typenumber, name, variableindex, time)
days = int(date)
seconds = int((date - days) * 86400)
extra = seconds % 10
if extra != 0:
if extra == 9:
seconds = seconds + 1
if extra == 1:
seconds = seconds - 1
date = begindate + datetime.timedelta(days=days, seconds=seconds)
dates.append(date)
values.append(value)
if itemtype == "system":
name = ""
jtsd.append(
pd.DataFrame(
| pd.Series(values, index=dates) | pandas.Series |
''' An experiment testing linearly interpolating the predictions of the MIMIC and HIRID models for fine-tuning'''
import argparse
import ipdb
import random
import os
import os.path
import pickle
import csv
import glob
import sys
import matplotlib
matplotlib.use("Agg")
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.pyplot as plt
import seaborn as sns
current_palette = sns.color_palette()
import scipy
import pandas as pd
import numpy as np
import numpy.random as np_rand
import sklearn.metrics as skmetrics
import circews.functions.util.io as mlhc_io
import circews.functions.util.array as mlhc_array
import circews.functions.util.filesystem as mlhc_fs
def score_metrics(labels, scores, correct_factor=None):
taus=[]
tps=[]
fps=[]
nps=[]
for tau in np.arange(0.00,1.01,0.01):
der_labels=(scores>=tau).astype(np.int)
taus.append(tau)
tp=np.sum((labels==1.0) & (der_labels==1.0))
npos=np.sum(labels==1.0)
fp=np.sum((labels==0.0) & (der_labels==1.0))
tps.append(tp)
fps.append(fp)
nps.append(npos)
tps=np.array(tps)
fps=np.array(fps)
taus=np.array(taus)
recalls=tps/nps
precisions=tps/(tps+correct_factor*fps)
precisions[np.isnan(precisions)]=1.0
return (precisions, recalls, taus)
def interpolated_mimic_hirid(configs):
static_cols_without_encode=["Age","Height","Emergency"]
static_cols_one_hot_encode=["Surgical","APACHEPatGroup"]
static_cols_one_hot_encode_str=["Sex"]
str_to_int_dict={"M": 0, "F": 1, "U": 2}
random.seed(configs["random_state"])
np_rand.seed(configs["random_state"])
held_out=configs["val_type"]
dim_reduced_str=configs["data_mode"]
task_key=configs["task_key"]
left_hours=configs["lhours"]
right_hours=configs["rhours"]
val_type=configs["val_type"]
assert(dim_reduced_str in ["reduced","non_reduced"])
feat_order=None
if dim_reduced_str=="reduced":
dim_reduced_data=True
else:
dim_reduced_data=False
batch_map=mlhc_io.load_pickle(configs["mimic_pid_map_path"])["pid_to_chunk"]
n_skipped_patients=0
scores_dict={}
labels_dict={}
cal_scores_dict={}
cal_labels_dict={}
hirid_ml_model,hirid_col_desc,hirid_split_key=("lightgbm", "shap_top20_variables_MIMIC","held_out")
hirid_model_dir=os.path.join(configs["predictions_dir"],"reduced",hirid_split_key,"{}_{}_{}_{}_{}".format(task_key, left_hours, right_hours, hirid_col_desc, hirid_ml_model))
hirid_model_dir=hirid_model_dir+"_full"
with open(os.path.join(hirid_model_dir,"best_model.pickle"),'rb') as fp:
hirid_model=pickle.load(fp)
hirid_feat_order=list(hirid_model._Booster.feature_name())
all_labels=[("lightgbm", "shap_top20_variables_MIMIConly_random_0","random_0"),("lightgbm", "shap_top20_variables_MIMIConly_random_1","random_1"),("lightgbm", "shap_top20_variables_MIMIConly_random_2","random_2"),
("lightgbm", "shap_top20_variables_MIMIConly_random_3","random_3"),("lightgbm", "shap_top20_variables_MIMIConly_random_4","random_4")]
for mimic_ml_model, mimic_col_desc,mimic_split_key in all_labels:
configs["split_key"]=mimic_split_key
print("Analyzing model ({},{},{})".format(mimic_ml_model,mimic_col_desc, mimic_split_key),flush=True)
mimic_data_split=mlhc_io.load_pickle(configs["mimic_split_path"])[mimic_split_key]
pred_pids=mimic_data_split[val_type]
print("Number of test PIDs: {}".format(len(pred_pids)),flush=True)
mimic_model_dir=os.path.join(configs["predictions_dir"],"reduced",hirid_split_key,"{}_{}_{}_{}_{}".format(task_key, left_hours, right_hours, mimic_col_desc, mimic_ml_model))
feat_dir=os.path.join(configs["mimic_ml_input_dir"],"reduced",hirid_split_key,"AllLabels_0.0_8.0","X")
labels_dir=os.path.join(configs["mimic_ml_input_dir"],"reduced",hirid_split_key,"AllLabels_0.0_8.0","y")
impute_dir=os.path.join(configs["mimic_imputed_dir"], "reduced",hirid_split_key)
mimic_model_dir=mimic_model_dir+"_full"
with open(os.path.join(mimic_model_dir,"best_model.pickle"),'rb') as fp:
mimic_model=pickle.load(fp)
mimic_feat_order=list(mimic_model._Booster.feature_name())
assert(hirid_feat_order==mimic_feat_order)
cum_pred_scores=[]
cum_labels=[]
cum_pred_scores_valid=[]
cum_labels_valid=[]
cum_pred_scores_retrain=[]
cum_labels_retrain=[]
df_shapelet_path=os.path.join(configs["mimic_shapelets_path"])
n_valid_count=0
skip_reason_key=skip_reason_ns_bef=skip_reason_ns_after=skip_reason_shapelet=0
if configs["val_type"]=="val" or configs["full_explore_mode"]:
ip_coeff=configs["ip_coeff"]
else:
val_results=glob.glob(os.path.join(configs["result_dir"],"result_val_*.tsv"))
val_dict={}
for rpath in sorted(val_results):
ip_coeff_val=float(rpath.split("/")[-1].split("_")[-1][:-4])
with open(rpath,'r') as fp:
csv_fp=csv.reader(fp)
next(csv_fp)
for split,auroc,auprc in csv_fp:
if not split==mimic_split_key:
continue
val_dict[ip_coeff_val]=float(auprc)
ip_coeff=max(val_dict,key=val_dict.get)
print("Best IP coeff on val set: {}".format(ip_coeff),flush=True)
for pidx,pid in enumerate(pred_pids):
if (pidx+1)%100==0 and configs["verbose"]:
print("{}/{}, KEY: {}, NS BEF: {}, NS AFT: {}, SHAPELET: {}".format(pidx+1,len(pred_pids), skip_reason_key, skip_reason_ns_bef,skip_reason_ns_after, skip_reason_shapelet),flush=True)
if pidx>=100 and configs["debug_mode"]:
break
batch_pat=batch_map[pid]
try:
pat_df=pd.read_hdf(os.path.join(feat_dir,"batch_{}.h5".format(batch_pat)), "/{}".format(pid), mode='r')
pat_label_df=pd.read_hdf(os.path.join(labels_dir,"batch_{}.h5".format(batch_pat)), "/{}".format(pid),mode='r')
assert(pat_df.shape[0]==pat_label_df.shape[0])
df_feat_valid=pat_df[pat_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]
df_label_valid=pat_label_df[pat_label_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]
assert(df_feat_valid.shape[0]==df_label_valid.shape[0])
except KeyError:
skip_reason_key+=1
continue
if df_feat_valid.shape[0]==0:
skip_reason_ns_bef+=1
continue
shapelet_df=pd.read_hdf(df_shapelet_path, '/{}'.format(pid), mode='r')
shapelet_df["AbsDatetime"]=pd.to_datetime(shapelet_df["AbsDatetime"])
special_cols=["AbsDatetime","PatientID"]
shapelet_cols=list(filter(lambda col: "_dist-set" in col, sorted(shapelet_df.columns.values.tolist())))
shapelet_df=shapelet_df[special_cols+shapelet_cols]
if shapelet_df.shape[0]==0:
skip_reason_shapelet+=1
continue
df_merged=pd.merge(df_feat_valid,shapelet_df,on=["AbsDatetime","PatientID"])
df_feat_valid=df_merged
pat_label_df_orig_cols=sorted(df_label_valid.columns.values.tolist())
df_label_valid=pd.merge(df_label_valid,shapelet_df,on=["AbsDatetime","PatientID"])
df_label_valid=df_label_valid[pat_label_df_orig_cols]
if df_feat_valid.shape[0]==0:
skip_reason_ns_after+=1
continue
all_feat_cols=sorted(df_feat_valid.columns.values.tolist())
sel_feat_cols=list(filter(lambda col: "Patient" not in col, all_feat_cols))
X_df=df_feat_valid[sel_feat_cols]
true_labels=df_label_valid["Label_WorseStateFromZero0.0To8.0Hours"]
assert(true_labels.shape[0]==X_df.shape[0])
X_feats=X_df[hirid_feat_order]
X_full_collect=[X_feats]
X_full=np.concatenate(X_full_collect,axis=1)
pred_scores_mimic=mimic_model.predict_proba(X_full)[:,1]
pred_scores_hirid=hirid_model.predict_proba(X_full)[:,1]
pred_scores_ip=ip_coeff*pred_scores_hirid+(1-ip_coeff)*pred_scores_mimic
df_out_dict={}
abs_dt=pat_df["AbsDatetime"]
rel_dt=pat_df["RelDatetime"]
pred_ip_vect=mlhc_array.empty_nan(abs_dt.size)
pred_ip_vect[pat_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]=pred_scores_ip
pred_mimic_vect=mlhc_array.empty_nan(abs_dt.size)
pred_mimic_vect[pat_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]=pred_scores_mimic
pred_hirid_vect=mlhc_array.empty_nan(abs_dt.size)
pred_hirid_vect[pat_df["SampleStatus_WorseStateFromZero0.0To8.0Hours"]=="VALID"]=pred_scores_hirid
pid_vect=mlhc_array.value_empty(abs_dt.size,pid)
y_vect=np.array(pat_label_df["Label_WorseStateFromZero0.0To8.0Hours"])
df_out_dict["PatientID"]=pid_vect
df_out_dict["PredScoreInterpolated"]=pred_ip_vect
df_out_dict["PredScoreHiRiD"]=pred_hirid_vect
df_out_dict["PredScoreMIMIC"]=pred_mimic_vect
df_out_dict["TrueLabel"]=y_vect
df_out_dict["AbsDatetime"]=abs_dt
df_out_dict["RelDatetime"]=rel_dt
df_out= | pd.DataFrame(df_out_dict) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymysql
import pandas as pd
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.colors as colors
import netCDF4 as nc
from netCDF4 import Dataset
#------------------------------------------------------------------------------
# Motivación codigo sección 1--------------------------------------------------
"Código para el dibujo y cálculo de los histogramas de frecuencias horarios de la lluvia en determinados puntos de medición. Se lee como"
"un pandas los datos a dibujar de cada punto de medición para luego calcular el histograma de los acumulados y de las horas de acumulados."
"Inicialmente, se crea con el propósito de estimar la distribucion de los acumulados en los puntos de medición de los paneles experimentales "
"Se hace con los datos del 2018."
Pluvio = 'si' ##--> Para que promedie la lluvia de los dos pluviometros debe ser 'si'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################
## ----------------LECTURA DE LOS ARCHIVOS DE ACUMULADOS----------------##
##########################################################################
df_Acum_JV = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH211.csv', sep=',', index_col =0)
df_Acum_CI = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH206.csv', sep=',', index_col =0)
df_Acum_TS = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH201.csv', sep=',', index_col =0)
df_Acum_JV.index = pd.to_datetime(df_Acum_JV.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_CI.index = pd.to_datetime(df_Acum_CI.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_TS.index = pd.to_datetime(df_Acum_TS.index, format="%Y-%m-%d %H:%M", errors='coerce')
df_Acum_JV = df_Acum_JV.between_time('06:00', '17:59')
df_Acum_CI = df_Acum_CI.between_time('06:00', '17:59')
df_Acum_TS = df_Acum_TS.between_time('06:00', '17:59')
########################################################################
## ----------------AJUSTE DE LOS DATOS DEL PLUVIÓMETRO----------------##
########################################################################
"Si uno de los archivos leidos tiene infomación de pluviometro, se deben promediar los acumulados horarios de P1 y P2 para tener un solo estimado."
if Pluvio == 'si':
df_Acum_JV['Precip'] = df_Acum_JV[['P1', 'P2']].mean(axis=1)
df_Acum_JV = df_Acum_JV.drop(['P1', 'P2'], axis=1)
########################################################################
## ----------------HISTOGRAMAS DE LA LLUVIA HORARIOS -----------------##
########################################################################
df_Acum_JV_rain = df_Acum_JV[df_Acum_JV['Precip']>0]
df_Acum_CI_rain = df_Acum_CI[df_Acum_CI['Precip']>0]
df_Acum_TS_rain = df_Acum_TS[df_Acum_TS['Precip']>0]
## -------------------------OBTENER LAS HORAS Y FECHAS LLUVIOSAS---------------------------- ##
Hora_JV = df_Acum_JV_rain.index.hour
Fecha_JV = df_Acum_JV_rain.index.date
Hora_CI = df_Acum_CI_rain.index.hour
Fecha_CI = df_Acum_CI_rain.index.date
Hora_TS = df_Acum_TS_rain.index.hour
Fecha_TS = df_Acum_TS_rain.index.date
## -----------------------------DIBUJAR LOS HISTOGRAMAS DE LAS HORAS ------ ----------------------- #
fig = plt.figure(figsize=[10, 6])
plt.rc('axes', edgecolor='gray')
ax1 = fig.add_subplot(1, 3, 1)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.hist(Hora_JV, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax1.set_title(u'Distribución de horas lluviosas en JV', fontproperties=prop, fontsize = 8)
ax1.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax1.set_xlabel(u'Horas', fontproperties=prop_1)
ax1.legend()
ax2 = fig.add_subplot(1, 3, 2)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.hist(Hora_CI, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax2.set_title(u'Distribución de horas lluviosas en CI', fontproperties=prop, fontsize = 8)
ax2.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax2.set_xlabel(u'Horas', fontproperties=prop_1)
ax2.legend()
ax3 = fig.add_subplot(1, 3, 3)
ax3.spines['top'].set_visible(False)
ax3.spines['right'].set_visible(False)
ax3.hist(Hora_TS, bins='auto', alpha = 0.5, color = 'orange', label = 'H_Lluvia')
ax3.set_title(u'Distribución de horas lluviosas en TS', fontproperties=prop, fontsize = 8)
ax3.set_ylabel(u'Frecuencia', fontproperties=prop_1)
ax3.set_xlabel(u'Horas', fontproperties=prop_1)
ax3.legend()
plt.savefig('/home/nacorreasa/Escritorio/Figuras/HistoHorasLluvia_2018.png')
plt.close('all')
os.system('scp /home/nacorreasa/Escritorio/Figuras/HistoHorasLluvia_2018.png [email protected]:/var/www/nacorreasa/Graficas_Resultados/Estudio')
#------------------------------------------------------------------------------
# Motivación codigo sección 2--------------------------------------------------
"En esta seccion del codigo se pretenda encontrar la correlación rezagada entre las horas de acuulados de precipitación y las las horas"
"nubladas para poder verificar la información de los umbrales de GOES CH2 para nubes."
################################################################################################
## -------------------------------LECTURA DE DATOS DE GOES CH02------------------------------ ##
################################################################################################
#ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C2_2019_0320_0822.nc')
ds = Dataset('/home/nacorreasa/Maestria/Datos_Tesis/GOES/GOES_nc_CREADOS/GOES_VA_C22018.nc')
## ---------------------------------AJUSTE DE LOS DATOS DE GOES CH2----------------------------------------- ##
lat = ds.variables['lat'][:, :]
lon = ds.variables['lon'][:, :]
Rad = ds.variables['Radiancias'][:, :, :]
## -- Obtener el tiempo para cada valor
tiempo = ds.variables['time']
fechas_horas = nc.num2date(tiempo[:], units=tiempo.units)
for i in range(len(fechas_horas)):
fechas_horas[i] = pd.to_datetime(fechas_horas[i] , format="%Y-%m-%d %H:%M", errors='coerce')
################################################################################################
##-------------------INCORPORANDO EL ARRAY DEL ZENITH PARA CADA HORA--------------------------##
################################################################################################
def Aclarado_visible(Path_Zenith, Path_Fechas, Rad, fechas_horas):
Z = np.load(Path_Zenith)
Fechas_Z = np.load(Path_Fechas)
daily_hours = np.arange(5, 19, 1)
Zenith = []
Fechas_Zenith = []
for i in range(len(Fechas_Z)):
if Fechas_Z[i].hour in daily_hours:
Zenith.append(Z[i, :, :])
Fechas_Zenith.append(Fechas_Z[i])
elif Fechas_Z[i].hour not in daily_hours:
pass
Zenith = np.array(Zenith)
Rad_clear = []
for i in range(len(Fechas_Zenith)):
for j in range(len(fechas_horas)):
if Fechas_Zenith[i].hour == fechas_horas[j].hour and Fechas_Zenith[i].day == fechas_horas[j].day:
Rad_clear.append(Rad[j, :, :]/np.cos(Zenith[i, :, :]))
else:
pass
Rad_clear = np.array(Rad_clear)
return Rad
Rad_Z = Aclarado_visible('/home/nacorreasa/Maestria/Datos_Tesis/hourlyZenith2018.npy', '/home/nacorreasa/Maestria/Datos_Tesis/DatesZenith.npy', Rad, fechas_horas)
del Rad
Rad = Rad_Z
## -- Selección del pixel de la TS y creación de DF
lat_index_975 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_975 = np.where((lon[0, :] < -75.58) & (lon[0, :] > -75.59))[0][0]
Rad_pixel_975 = Rad[:, lat_index_975, lon_index_975]
Rad_df_975 = pd.DataFrame()
Rad_df_975['Fecha_Hora'] = fechas_horas
Rad_df_975['Radiacias'] = Rad_pixel_975
Rad_df_975['Fecha_Hora'] = pd.to_datetime(Rad_df_975['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_975.index = Rad_df_975['Fecha_Hora']
Rad_df_975 = Rad_df_975.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la CI
lat_index_350 = np.where((lat[:, 0] > 6.16) & (lat[:, 0] < 6.17))[0][0]
lon_index_350 = np.where((lon[0, :] < -75.64) & (lon[0, :] > -75.65))[0][0]
Rad_pixel_350 = Rad[:, lat_index_350, lon_index_350]
Rad_df_350 = pd.DataFrame()
Rad_df_350['Fecha_Hora'] = fechas_horas
Rad_df_350['Radiacias'] = Rad_pixel_350
Rad_df_350['Fecha_Hora'] = pd.to_datetime(Rad_df_350['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_350.index = Rad_df_350['Fecha_Hora']
Rad_df_350 = Rad_df_350.drop(['Fecha_Hora'], axis=1)
## -- Selección del pixel de la JV
lat_index_348 = np.where((lat[:, 0] > 6.25) & (lat[:, 0] < 6.26))[0][0]
lon_index_348 = np.where((lon[0, :] < -75.54) & (lon[0, :] > -75.55))[0][0]
Rad_pixel_348 = Rad[:, lat_index_348, lon_index_348]
Rad_df_348 = pd.DataFrame()
Rad_df_348['Fecha_Hora'] = fechas_horas
Rad_df_348['Radiacias'] = Rad_pixel_348
Rad_df_348['Fecha_Hora'] = pd.to_datetime(Rad_df_348['Fecha_Hora'], format="%Y-%m-%d %H:%M", errors='coerce')
Rad_df_348.index = Rad_df_348['Fecha_Hora']
Rad_df_348 = Rad_df_348.drop(['Fecha_Hora'], axis=1)
'OJOOOO DESDE ACÁ-----------------------------------------------------------------------------------------'
'Para el caso de la relación si es bueno tener la información en resolución horaria.'
## ------------------------CAMBIANDO LOS DATOS HORARIOS POR LOS ORIGINALES---------------------- ##
# Rad_df_348_h = Rad_df_348
# Rad_df_350_h = Rad_df_350
# Rad_df_975_h = Rad_df_975
## ------------------------------------DATOS HORARIOS DE REFLECTANCIAS------------------------- ##
Rad_df_348_h = Rad_df_348.groupby(pd.Grouper(freq="H")).mean()
Rad_df_350_h = Rad_df_350.groupby( | pd.Grouper(freq="H") | pandas.Grouper |
from pickle import loads, dumps
import numpy as np
import pandas as pd
from classicML import _cml_precision
from classicML import CLASSICML_LOGGER
from classicML.api.models import BaseModel
from classicML.backend import get_conditional_probability
from classicML.backend import get_dependent_prior_probability
from classicML.backend import get_probability_density
from classicML.backend import type_of_target
from classicML.backend import io
class OneDependentEstimator(BaseModel):
"""独依赖估计器的基类.
Attributes:
attribute_name: list of name, default=None,
属性的名称.
is_trained: bool, default=False,
模型训练后将被标记为True.
is_loaded: bool, default=False,
如果模型加载了权重将被标记为True.
Raises:
NotImplementedError: compile, fit, predict方法需要用户实现.
"""
def __init__(self, attribute_name=None):
"""初始化独依赖估计器.
Arguments:
attribute_name: list of name, default=None,
属性的名称.
"""
super(OneDependentEstimator, self).__init__()
self.attribute_name = attribute_name
self.is_trained = False
self.is_loaded = False
def compile(self, *args, **kwargs):
"""编译独依赖估计器.
"""
raise NotImplementedError
def fit(self, x, y, **kwargs):
"""训练独依赖估计器.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
y: numpy.ndarray or pandas.DataFrame, array-like, 标签.
"""
raise NotImplementedError
def predict(self, x, **kwargs):
"""使用独依赖估计器进行预测.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
"""
raise NotImplementedError
def load_weights(self, filepath):
"""加载模型参数.
Arguments:
filepath: str, 权重文件加载的路径.
Raises:
KeyError: 模型权重加载失败.
Notes:
模型将不会加载关于优化器的超参数.
"""
raise NotImplementedError
def save_weights(self, filepath):
"""将模型权重保存为一个HDF5文件.
Arguments:
filepath: str, 权重文件保存的路径.
Raises:
TypeError: 模型权重保存失败.
Notes:
模型将不会保存关于优化器的超参数.
"""
raise NotImplementedError
class SuperParentOneDependentEstimator(OneDependentEstimator):
"""超父独依赖估计器.
Attributes:
attribute_name: list of name, default=None,
属性的名称.
super_parent_name: str, default=None,
超父的名称.
super_parent_index: int, default=None,
超父的索引值.
_list_of_p_c: list,
临时保存中间的概率依赖数据.
smoothing: bool, default=None,
是否使用平滑, 这里的实现是拉普拉斯修正.
"""
def __init__(self, attribute_name=None):
"""初始化超父独依赖估计器.
Arguments:
attribute_name: list of name, default=None,
属性的名称.
"""
super(SuperParentOneDependentEstimator, self).__init__(attribute_name=attribute_name)
self.super_parent_name = None
self.super_parent_index = None
self.smoothing = None
self._list_of_p_c = list()
def compile(self, super_parent_name, smoothing=True):
"""编译超父独依赖估计器.
Arguments:
super_parent_name: str, default=None,
超父的名称.
smoothing: bool, default=True,
是否使用平滑, 这里的实现是拉普拉斯修正.
"""
self.super_parent_name = super_parent_name
self.smoothing = smoothing
def fit(self, x, y, **kwargs):
"""训练超父独依赖估计器.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
y: numpy.ndarray or pandas.DataFrame, array-like, 标签.
Returns:
SuperParentOneDependentEstimator实例.
"""
if isinstance(x, np.ndarray) and self.attribute_name is None:
CLASSICML_LOGGER.warn("属性名称缺失, 请使用pandas.DataFrame; 或检查 self.attributes_name")
# 为特征数据添加属性信息.
x = pd.DataFrame(x, columns=self.attribute_name)
x.reset_index(drop=True, inplace=True)
y = pd.Series(y)
y.reset_index(drop=True, inplace=True)
for index, feature_name in enumerate(x.columns):
if self.super_parent_name == feature_name:
self.super_parent_index = index
for category in np.unique(y):
unique_values_xi = x.iloc[:, self.super_parent_index].unique()
for value in unique_values_xi:
# 初始化概率字典.
p_c = dict()
# 获取有依赖的类先验概率P(c, xi).
c_xi = (x.values[:, self.super_parent_index] == value) & (y == category)
c_xi = x.values[c_xi, :]
p_c_xi = get_dependent_prior_probability(len(c_xi),
len(x.values),
len(unique_values_xi),
self.smoothing)
p_c.update({'p_c_xi': _cml_precision.float(p_c_xi)})
# 获取有依赖的类条件概率P(xj|c, xi)或概率密度p(xj|c, xi)所需的信息.
for attribute in range(x.shape[1]):
xj = x.iloc[:, attribute]
continuous = type_of_target(xj.values) == 'continuous'
if continuous:
# 连续值概率密度函数信息.
if len(c_xi) <= 2:
# 样本数量过少的时候, 使用全局的均值和方差.
mean = np.mean(x.values[y == category, attribute])
var = np.var(x.values[y == category, attribute])
else:
mean = np.mean(c_xi[:, attribute])
var = np.var(c_xi[:, attribute])
p_c.update({x.columns[attribute]: {
'continuous': continuous,
'values': [mean, var]}})
else:
# 离散值条件概率信息.
unique_value = xj.unique()
num_of_unique_value = len(unique_value)
value_count = pd.DataFrame(np.zeros((1, num_of_unique_value)), columns=unique_value)
for key in pd.value_counts(c_xi[:, attribute]).keys():
value_count[key] += pd.value_counts(c_xi[:, attribute])[key]
# 统计不同属性值的样本总数.
D_c_xi = dict()
for name in value_count:
D_c_xi.update({name: _cml_precision.float(value_count[name].values)})
p_c.update({x.columns[attribute]: {
'continuous': continuous,
'values': [D_c_xi, c_xi.shape[0], num_of_unique_value],
'smoothing': self.smoothing}})
self._list_of_p_c.append({'category': category, 'attribute': value, 'p_c': p_c})
self.is_trained = True
return self
def predict(self, x, probability=False):
"""使用超父独依赖估计器进行预测.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
probability: bool, default=False,
是否使用归一化的概率形式.
Returns:
SuperParentOneDependentEstimator的预测结果,
不使用概率形式将返回0或1的标签数组, 使用将返回反正例概率的数组.
Raises:
ValueError: 模型没有训练的错误.
"""
if self.is_trained is False and self.is_loaded is False:
CLASSICML_LOGGER.error('模型没有训练')
raise ValueError('你必须先进行训练')
# 为特征数据添加属性信息.
x = pd.DataFrame(x, columns=self.attribute_name)
x.reset_index(drop=True, inplace=True)
y_pred = list()
if len(x.shape) == 1:
p_0, p_1 = self._predict(x)
if probability:
y_pred.append([p_0 / (p_0 + p_1), p_1 / (p_0 + p_1)])
else:
if p_0 > p_1:
y_pred.append(0)
else:
y_pred.append(1)
else:
for i in range(x.shape[0]):
x_test = x.iloc[i, :]
p_0, p_1 = self._predict(x_test)
if probability:
y_pred.append([p_0 / (p_0 + p_1), p_1 / (p_0 + p_1)])
else:
if p_0 > p_1:
y_pred.append(0)
else:
y_pred.append(1)
return y_pred
def load_weights(self, filepath):
"""加载模型参数.
Arguments:
filepath: str, 权重文件加载的路径.
Raises:
KeyError: 模型权重加载失败.
Notes:
模型将不会加载关于优化器的超参数.
"""
# 初始化权重文件.
parameters_gp = io.initialize_weights_file(filepath=filepath,
mode='r',
model_name='SuperParentOneDependentEstimator')
# 加载模型参数.
try:
compile_ds = parameters_gp['compile']
weights_ds = parameters_gp['weights']
self.super_parent_name = compile_ds.attrs['super_parent_name']
self.smoothing = compile_ds.attrs['smoothing']
self._list_of_p_c = loads(weights_ds.attrs['_list_of_p_c'].tobytes())
# 标记加载完成
self.is_loaded = True
except KeyError:
CLASSICML_LOGGER.error('模型权重加载失败, 请检查文件是否损坏')
raise KeyError('模型权重加载失败')
def save_weights(self, filepath):
"""将模型权重保存为一个HDF5文件.
Arguments:
filepath: str, 权重文件保存的路径.
Raises:
TypeError: 模型权重保存失败.
Notes:
模型将不会保存关于优化器的超参数.
"""
# 初始化权重文件.
parameters_gp = io.initialize_weights_file(filepath=filepath,
mode='w',
model_name='SuperParentOneDependentEstimator')
# 保存模型参数.
try:
compile_ds = parameters_gp['compile']
weights_ds = parameters_gp['weights']
compile_ds.attrs['super_parent_name'] = self.super_parent_name
compile_ds.attrs['smoothing'] = self.smoothing
weights_ds.attrs['_list_of_p_c'] = np.void(dumps(self._list_of_p_c))
except TypeError:
CLASSICML_LOGGER.error('模型权重保存失败, 请检查文件是否损坏')
raise TypeError('模型权重保存失败')
def _predict(self, x, attribute_list=None, super_parent_index=None):
"""通过平均独依赖估计器预测单个样本.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like,
特征数据.
attribute_list: list, default=None,
临时保存中间的概率依赖数据(包含所有的属性, 仅使用AODE时有意义).
super_parent_index: int, default=None,
超父的索引值.
Returns:
返回预测的结果.
"""
y_pred = np.zeros([2], dtype=_cml_precision.float)
if attribute_list is None and super_parent_index is None:
for i in self._list_of_p_c:
self._calculate_posterior_probability(x, i, y_pred)
else:
# TODO(<NAME>, tag:performance): 这里是为了满足AODE调用的便利.
for i in attribute_list[super_parent_index]:
if i['attribute'] == x[super_parent_index]:
self._calculate_posterior_probability(x, i, y_pred)
return y_pred
@staticmethod
def _calculate_posterior_probability(x, i, y_pred):
"""计算后验概率.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like,
特征数据.
i: int, 样本的索引.
y_pred: list, 后验概率列表.
"""
_p_c = i['p_c']
if i['category'] == 0:
for index, probability in enumerate(_p_c):
if probability == 'p_c_xi':
# 先添加P(c, xi)
y_pred[0] += np.log(_p_c[probability])
else:
# 添加P(xj|c, xi)
continuous = _p_c[probability]['continuous']
# 分别处理连续值和离散值.
if continuous:
mean, var = _p_c[probability]['values']
probability_density = get_probability_density(x[index - 1], mean, var)
y_pred[0] += np.log(probability_density) # 存放数据中多存放一个p_c_xi导致和x的索引无法对齐.
else:
D_c_xi_xj, D_c_xi, num_of_unique_value = _p_c[probability]['values']
y_pred[0] += np.log(get_conditional_probability(_cml_precision.int(D_c_xi_xj[x[index - 1]]),
D_c_xi,
num_of_unique_value,
_p_c[probability]['smoothing']))
elif i['category'] == 1:
for index, probability in enumerate(_p_c):
if probability == 'p_c_xi':
y_pred[1] += np.log(_p_c[probability])
else:
continuous = _p_c[probability]['continuous']
if continuous:
mean, var = _p_c[probability]['values']
probability_density = get_probability_density(x[index - 1], mean, var)
y_pred[1] += np.log(probability_density)
else:
D_c_xi_xj, D_c_xi, num_of_unique_value = _p_c[probability]['values']
y_pred[1] += np.log(get_conditional_probability(_cml_precision.int(D_c_xi_xj[x[index - 1]]),
D_c_xi,
num_of_unique_value,
_p_c[probability]['smoothing']))
class AveragedOneDependentEstimator(SuperParentOneDependentEstimator):
"""平均独依赖估计器.
Attributes:
attribute_name: list of name, default=None,
属性的名称.
super_parent_name: str, default=None,
超父的名称.
smoothing: bool, default=None,
是否使用平滑, 这里的实现是拉普拉斯修正.
m: int, default=0,
阈值常数, 样本小于此值的属性将不会被作为超父类.
_attribute_list: list,
临时保存中间的概率依赖数据(包含所有的属性).
"""
def __init__(self, attribute_name=None):
"""初始化平均独依赖估计器.
Arguments:
attribute_name: list of name, default=None,
属性的名称.
"""
super(AveragedOneDependentEstimator, self).__init__(attribute_name=attribute_name)
self.smoothing = None
self.m = 0
self._attribute_list = list()
def compile(self, smoothing=True, m=0, **kwargs):
"""编译平均独依赖估计器.
Arguments:
smoothing: bool, default=True,
是否使用平滑, 这里的实现是拉普拉斯修正.
m: int, default=0,
阈值常数, 样本小于此值的属性将不会被作为超父类.
"""
self.smoothing = smoothing
self.m = m
def fit(self, x, y, **kwargs):
"""训练平均独依赖估计器.
Arguments:
x: numpy.ndarray or pandas.DataFrame, array-like, 特征数据.
y: numpy.ndarray or pandas.DataFrame, array-like, 标签.
Returns:
AverageOneDependentEstimator实例.
"""
if isinstance(x, np.ndarray) and self.attribute_name is None:
CLASSICML_LOGGER.warn("属性名称缺失, 请使用pandas.DataFrame; 或检查 self.attributes_name")
# TODO(<NAME>, tag:code): 暂时没有找到合理的断点续训的理论支持.
self._attribute_list = list()
# 为特征数据添加属性信息.
x = pd.DataFrame(x, columns=self.attribute_name)
x.reset_index(drop=True, inplace=True)
y = | pd.Series(y) | pandas.Series |
from pytorch_lightning.core.step_result import TrainResult
import pandas as pd
import torch
import math
import numpy as np
from src.utils import simple_accuracy
from copy import deepcopy
from torch.optim.lr_scheduler import LambdaLR
class WeightEMA(object):
def __init__(self, model, ema_model, alpha=0.999):
self.model = model
self.ema_model = ema_model
self.ema_model.eval()
self.alpha = alpha
self.ema_has_module = hasattr(self.ema_model, 'module')
# Fix EMA. https://github.com/valencebond/FixMatch_pytorch thank you!
self.param_keys = [k for k, _ in self.ema_model.named_parameters()]
self.buffer_keys = [k for k, _ in self.ema_model.named_buffers()]
for p in self.ema_model.parameters():
p.requires_grad_(False)
def step(self):
needs_module = hasattr(self.model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = self.model.state_dict()
esd = self.ema_model.state_dict()
for k in self.param_keys:
if needs_module:
j = 'module.' + k
else:
j = k
model_v = msd[j].detach()
ema_v = esd[k]
esd[k].copy_(ema_v * self.alpha + (1. - self.alpha) * model_v)
for k in self.buffer_keys:
if needs_module:
j = 'module.' + k
else:
j = k
esd[k].copy_(msd[j])
class UnlabelledStatisticsLogger:
def __init__(self, level='image', save_frequency=500, artifacts_path=None, name='unlabelled'):
self.level = level
self.batch_dfs = []
self.save_frequency = save_frequency
self.artifacts_path = artifacts_path
self.logging_df = | pd.DataFrame() | pandas.DataFrame |
import datetime
from pandas.core import series
import pytz
import os
import pathlib
import csv
import math
import urllib.request
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
population_total = 32657400
# Get the current generation time in MYT timezone
timeZ_My = pytz.timezone('Asia/Kuala_Lumpur')
now = datetime.datetime.now(timeZ_My)
current_time = now.strftime("%Y-%m-%d %H:%M:%S")
# Get datapoints from official CITF Malaysia and process as CSV with Pandas
url = "https://raw.githubusercontent.com/CITF-Malaysia/citf-public/main/vaccination/vax_malaysia.csv"
df = | pd.read_csv(url) | pandas.read_csv |
import os, sys
from numpy.lib.function_base import copy
import cv2
import numpy as np
import pandas as pd
import torch as th
from stable_baselines3.common.utils import get_device
from kairos_minerl.gail_wrapper import (
ActionShaping_FindCave,
ActionShaping_Waterfall,
ActionShaping_Animalpen,
ActionShaping_Villagehouse,
ActionShaping_Navigation,
)
# OPERATION MODE
MODEL_OP_MODE = os.getenv('MODEL_OP_MODE', None)
class KAIROS_GUI():
"""
Displays agent POV and internal states relevant when debugging.
"""
def __init__(self, exp_id, save_video=True):
self.resolution = 512 # pixels
self.resolution_x = 1024 # pixels
self.resolution_y = 512 # pixels
self.font = cv2.FONT_HERSHEY_SIMPLEX
self.font_scale = 0.5
self.text_color = (255, 255, 255)
self.thickness = 1
self.waitkey_delay = 1
self.intervention_mode = False
self.intervention_key = None
self.action_position = (int(0.01*self.resolution), int(0.05*self.resolution))
self.y_vision_feature_offset = int(0.04*self.resolution)
self.state_classifier_position = (int(0.01*self.resolution), int(0.1*self.resolution))
self.subtask_text_position = (int(0.01*self.resolution), int(0.97*self.resolution))
self.save_video = save_video
# setup video
self.out = cv2.VideoWriter(
f'train/videos/kairos_minerl_{exp_id}.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 20, (self.resolution_x, self.resolution_y))
self.out_original = cv2.VideoWriter(
f'train/videos/original_{exp_id}.mp4',cv2.VideoWriter_fourcc(*'DIVX'), 20, (self.resolution, self.resolution))
def display_step(self, obs, state_classifier, action, subtask, odom_frame):
# setup image to display
obs_as_rgb_img = cv2.resize(obs, dsize=[self.resolution,self.resolution])
# reverse blue and red channels
red = obs_as_rgb_img[:,:,2].copy()
blue = obs_as_rgb_img[:,:,0].copy()
obs_as_rgb_img[:,:,0] = red
obs_as_rgb_img[:,:,2] = blue
# save original resized frame with no labels, odometry, etc
if self.save_video:
self.out_original.write(obs_as_rgb_img)
# display actions
obs_as_rgb_img = cv2.putText(obs_as_rgb_img, f'action: {action.data}', self.action_position, self.font,
self.font_scale, self.text_color, self.thickness, cv2.LINE_AA)
# display visual features
y_vision_feature_step = 0.
obs_as_rgb_img = cv2.putText(obs_as_rgb_img, 'state_classifier:', self.state_classifier_position, self.font,
self.font_scale, self.text_color, self.thickness, cv2.LINE_AA)
for key, value in state_classifier.items():
y_vision_feature_step += 1.
single_state_classifier_position = (int(0.01*self.resolution), int(0.1*self.resolution + y_vision_feature_step*self.y_vision_feature_offset))
obs_as_rgb_img = cv2.putText(obs_as_rgb_img, f' {key}: {value:.2f}', single_state_classifier_position, self.font,
self.font_scale, self.text_color, self.thickness, cv2.LINE_AA)
# display subtask
obs_as_rgb_img = cv2.putText(obs_as_rgb_img, f'subtask: {subtask}', self.subtask_text_position, self.font,
self.font_scale, self.text_color, self.thickness, cv2.LINE_AA)
# display intervention mode indicator
if self.intervention_mode:
obs_as_rgb_img = cv2.putText(obs_as_rgb_img, 'INTERVENTION MODE', (self.subtask_text_position[0]+320, self.subtask_text_position[1]), self.font,
self.font_scale, (0,0,255), self.thickness, cv2.LINE_AA)
# concatenate odometry frame
obs_as_rgb_img = np.concatenate((obs_as_rgb_img, odom_frame), axis=1)
# display image
cv2.imshow("KAIROS MineRL", obs_as_rgb_img)
key_pressed = cv2.waitKey(self.waitkey_delay)
if key_pressed != -1:
self.intervention_key = chr(key_pressed)
if self.intervention_key == 'i':
print('INTERVENTION TRIGGERED')
self.waitkey_delay = 1-self.waitkey_delay # flips between 1 and 0
self.intervention_mode = True if self.waitkey_delay==0 else False
# save frame
if self.save_video:
self.out.write(obs_as_rgb_img)
def close(self):
cv2.destroyAllWindows()
if self.save_video:
self.out.release()
self.out_original.release()
def compute_intervention_action(self):
"""
Table of Actions
[0] "attack"
[1] "back"
[2] "camera_up_down" (float, negative is UP)
[3] "camera_right_left" (float, negative is LEFT)
[4] "equip"
[5] "forward"
[6] "jump"
[7] "left"
[8] "right"
[9] "sneak"
[10] "sprint"
[11] "use"
"""
action = th.zeros(12)
# compute action based on intervention key
if self.intervention_key == 'w': # move forward
action[5] = 1
elif self.intervention_key == 's': # move backward
action[1] = 1
elif self.intervention_key == 'a': # turn left (camera)
action[3] = -10
elif self.intervention_key == 'd': # turn right (camera)
action[3] = 10
elif self.intervention_key == 'q': # turn down (camera)
action[2] = 10
elif self.intervention_key == 'e': # turn up (camera)
action[2] = -10
elif self.intervention_key == ' ': # jump forward
action[5] = 1
action[6] = 1
# equip a random food
action[4] = np.random.choice([2,12,13])
# reset key so it does not apply the same actin multiple times
self.intervention_key = None
return action
class KAIROS_StateMachine():
"""
Controls sequence of sub-tasks to follow for each environemnt.
"""
def __init__(self, env, env_name, odometry, bc_model, bc_num_classes, device):
self.env = env
self.env_name = env_name
self.odometry = odometry
self.bc_model = bc_model
self.bc_num_classes = int(bc_num_classes)
self.device = device
self._initialize_mapping()
self.subtask = 0
self.executing_multistep_subtask = False
self.bc_in_control = False
# define task
if self.env_name == "MineRLBasaltFindCaveHighRes-v0" or self.env_name == "MineRLBasaltFindCave-v0":
self.task = "FIND_CAVE"
self.setup_cave_task()
elif self.env_name == "MineRLBasaltMakeWaterfallHighRes-v0" or self.env_name == "MineRLBasaltMakeWaterfall-v0":
self.task = "MAKE_WATERFALL"
self.setup_waterfall_task()
elif self.env_name == "MineRLBasaltCreateVillageAnimalPenHighRes-v0" or self.env_name == "MineRLBasaltCreateVillageAnimalPen-v0":
self.task = "CREATE_PEN"
self.setup_pen_subtask()
elif self.env_name == "MineRLBasaltBuildVillageHouseHighRes-v0" or self.env_name == "MineRLBasaltBuildVillageHouse-v0":
self.task = "BUILD_HOUSE"
self.setup_house_subtask()
else:
raise ValueError("Invalid environment. Check environ.sh")
# global states
self.good_waterfall_view = False
# setup behavior cloning action space
self.setup_bc_actions()
# setup consensus
self.triggerred_consensus = False
self.num_consensus_steps = 50
self.consensus_steps = 0
# setup task-specific subtasks
self.allow_escape_water = True
self.setup_escape_water_subtask()
# setup tracking of open-space areas
self.num_open_spaces = 5
self.open_space_tracker = self.num_open_spaces*[0]
# setup tracking of danger_ahead
self.num_danger_aheads = 5
self.danger_ahead_tracker = self.num_danger_aheads*[0]
# setup tracking of has_animals
self.num_has_animals = 5
self.has_animals_tracker = self.num_has_animals*[0]
# setup tracking of top_of_waterfall
self.num_top_of_waterfall = 5
self.top_of_waterfall_tracker = self.num_top_of_waterfall*[0]
# keep track of vertical camera angle
self.default_camera_angle = 10 # positive is down
self.goal_camera_angle = self.default_camera_angle
# translate bc actions to original dict
def translate_bc_to_raw_actions(self, discrete_action):
# reset actions
action = th.zeros(12)
if discrete_action != (self.bc_num_classes-1): # additional noop action
# convert bc action from string format to number
bc_actions = self.bc_action_map[discrete_action]
for bc_action in bc_actions:
if bc_action[0] != 'camera':
if bc_action[0] == 'equip':
action[self.action_str_to_int[bc_action[0]]] = self.item_map[bc_action[1]]
else:
action[self.action_str_to_int[bc_action[0]]] = bc_action[1]
else:
# turn camera left/right
if bc_action[1][0] == 0:
action[3] = bc_action[1][1]
# turn camera up/down
elif bc_action[1][1] == 0:
action[2] = bc_action[1][1]
return action
def setup_bc_actions(self):
# initialize action shaping class used to train model
if MODEL_OP_MODE == "hybrid_navigation":
action_shaping = ActionShaping_Navigation(env=self.env.env.env.env)
elif self.task == "FIND_CAVE" and MODEL_OP_MODE == "bc_only":
action_shaping = ActionShaping_FindCave(env=self.env.env.env.env)
elif self.task == "MAKE_WATERFALL" and MODEL_OP_MODE == "bc_only":
action_shaping = ActionShaping_Waterfall(env=self.env.env.env.env)
elif self.task == "CREATE_PEN" and MODEL_OP_MODE == "bc_only":
action_shaping = ActionShaping_Animalpen(env=self.env.env.env.env)
elif self.task == "BUILD_HOUSE" and MODEL_OP_MODE == "bc_only":
action_shaping = ActionShaping_Villagehouse(env=self.env.env.env.env)
else:
action_shaping = ActionShaping_Navigation(env=self.env.env.env.env)
# setup translation from string to int
self.action_str_to_int = {
"attack": 0,
"back": 1,
"camera_up_down": 2,
"camera_right_left": 3,
"equip": 4,
"forward": 5,
"jump": 6,
"left": 7,
"right": 8,
"sneak": 9,
"sprint": 10,
"use": 11,
}
self.bc_action_map = action_shaping._actions
def subtask_find_goal(self, obs):
# reset actions
action = th.zeros(12)
if MODEL_OP_MODE == 'engineered_only':
self.bc_in_control = False
# move forward with random chance of jumps
action[5] = 1 # forward
if np.random.rand() < 0.1:
action[3] = -10 # turn camera left
elif np.random.rand() > 0.9:
action[3] = 10 # turn camera right
# randomly jump
if np.random.rand() < 0.25:
action[6] = 1 # jump
elif MODEL_OP_MODE == 'hybrid_navigation':
action = self.compute_bc_action(obs)
self.bc_in_control = True
return action
def subtask_go_to_goal(self, obs):
# reset actions
action = th.zeros(12)
if MODEL_OP_MODE == 'engineered_only':
self.bc_in_control = False
# move forward with random chance of jumps
action[5] = 1 # forward
if np.random.rand() < 0.1:
action[3] = -10 # turn camera left
elif np.random.rand() > 0.9:
action[3] = 10 # turn camera right
# randomly jump
if np.random.rand() < 0.5:
action[6] = 1 # jump
elif MODEL_OP_MODE == 'hybrid_navigation':
action = self.compute_bc_action(obs)
self.bc_in_control = True
# # check if waterfall was placed to move on to next state
# # (equipped and used water bucket)
# if self.task == 'MAKE_WATERFALL' and action[11] == 1 and action[4] == 11:
# self.reached_top = True
# self.built_waterfall = True
# else:
# self.reached_top = False
# self.built_waterfall = False
return action
def subtask_end_episode(self):
# reset actions
action = th.zeros(12)
# throw snowball
if self.task == "BUILD_HOUSE":
action[4] = 22 # equip it
else:
action[4] = 8 # equip it
action[11] = 1 # throw it
return action
def track_state_classifier(self, state_classifier):
# Keep track of open spaces
self.open_space_tracker.pop(0)
self.open_space_tracker.append(state_classifier['has_open_space'])
self.odometry.good_build_spot = True if np.mean(self.open_space_tracker)>0.75 else False
# Keep track of danger_ahead
self.danger_ahead_tracker.pop(0)
self.danger_ahead_tracker.append(state_classifier['danger_ahead'])
self.odometry.agent_swimming = True if np.mean(self.danger_ahead_tracker)>0.4 else False
# Keep track of animals
self.has_animals_tracker.pop(0)
self.has_animals_tracker.append(state_classifier['has_animals'])
self.odometry.has_animals_spot = True if np.mean(self.has_animals_tracker)>0.8 else False
# Keep track of when on top of waterfalls
self.top_of_waterfall_tracker.pop(0)
self.top_of_waterfall_tracker.append(state_classifier['at_the_top_of_a_waterfall'])
self.odometry.top_of_waterfall_spot = True if np.mean(self.top_of_waterfall_tracker)>0.8 else False
def compute_bc_action(self, obs):
# Forward pass through model
obs = th.Tensor(obs).unsqueeze(0).to(self.device)
# Note, latest model passes out logits, so a softmax is needed for probabilities
scores = self.bc_model(obs)
probabilities = th.nn.functional.softmax(scores)
# Into numpy
probabilities = probabilities.detach().cpu().numpy()
# Sample action according to the probabilities
discrete_action = np.random.choice(np.arange(self.bc_num_classes), p=probabilities[0])
# translate discrete action to original action space
action = self.translate_bc_to_raw_actions(discrete_action)
# make sure we have a weapon equipped
action[4] = self.item_map['stone_pickaxe'] # dont have shovel in build house task
return action
def compute_action(self, obs, state_classifier, env_step):
"""
Table of Actions
[0] "attack"
[1] "back"
[2] "camera_up_down" (float, negative is UP)
[3] "camera_right_left" (float, negative is LEFT)
[4] "equip"
[5] "forward"
[6] "jump"
[7] "left"
[8] "right"
[9] "sneak"
[10] "sprint"
[11] "use"
Table of Subtasks
0: "find_goal",
1: "go_to_goal",
2: "end_episode",
3: "climb_up",
4: "climb_down",
5: "place_waterfall",
6: "look_around",
7: "build_pen",
8: "go_to_location",
9: "lure_animals",
10: "leave_pen",
11: "infer_biome",
12: "build_house",
13: "tour_inside_house",
14: "leave_house",
"""
# track previous relevant classified states
self.track_state_classifier(state_classifier)
# Consensus is a way to look around to gather more data and make sure
# the state classifier is outputting the correct thing
action = th.zeros(12)
if self.triggerred_consensus:
action = self.step_consensus(action, state_classifier)
else:
# avoid danger
if self.subtask == 'escape_water':
action = self.step_escape_water_subtask()
return action
# execute subtasks
if self.subtask == 'build_house' and not self.house_built:
action = self.step_house_subtask()
return action
elif self.subtask == 'build_pen' and not self.pen_built:
action = self.step_pen_subtask()
return action
elif self.subtask == 'lure_animals' and self.pen_built:
action = self.step_lure_animals_subtask(obs)
return action
elif self.subtask == 'climb_up' and not self.reached_top:
action = self.step_climb_up_subtask(state_classifier)
return action
elif self.subtask == 'place_waterfall' and self.reached_top:
action = self.subtask_place_waterfall()
return action
elif self.subtask == 'go_to_picture_location':
action = self.step_go_to_picture_location()
return action
elif self.subtask == 'find_goal':
action = self.subtask_find_goal(obs)
elif self.subtask == 'go_to_goal':
action = self.subtask_go_to_goal(obs)
elif self.subtask == 'end_episode':
action = self.subtask_end_episode()
# # TODO: find object direction
# if not self.triggerred_consensus:
# self.consensus_states = {key: [] for key, value in state_classifier.items()}
# self.consensus_states['heading'] = []
# self.triggerred_consensus = True
# Make sure camera angle is at the desired angle
if not self.good_waterfall_view and not self.bc_in_control:
action = self.update_vertical_camera_angle(action)
return action
def update_subtask(self, state_classifier, env_step):
self.env_step = env_step
if not self.executing_multistep_subtask:
# PRIORITY: escape water
if self.odometry.agent_swimming and self.allow_escape_water:
self.subtask = 'escape_water'
return
if self.task == 'FIND_CAVE':
# timeout to find cave
if env_step > self.timeout_to_find_cave:
self.subtask = 'end_episode'
return
if state_classifier['inside_cave'] > 0.9:
self.subtask = 'end_episode'
return
if self.task == 'MAKE_WATERFALL':
if self.good_waterfall_view and self.built_waterfall:
self.subtask = 'end_episode'
return
if self.reached_top and self.built_waterfall:
self.subtask = 'go_to_picture_location'
self.allow_escape_water = False
return
if self.reached_top:# and not self.bc_in_control:
self.subtask = 'place_waterfall'
self.allow_escape_water = False
return
# timeout to climb and build waterfall
if env_step > self.timeout_to_build_waterfall and not self.reached_top:# and not self.bc_in_control:
self.subtask = 'climb_up'
self.found_mountain = True
self.allow_escape_water = False
# # OVERWRITE PILLAR CONSTRUCTION
# self.reached_top = True
# self.subtask = 'place_waterfall'
return
# triggers waterfall construction based on at_the_top_of_a_waterfall and facing_wall
if state_classifier['at_the_top_of_a_waterfall'] > 0.5 and self.moving_towards_mountain:# and not self.bc_in_control:
self.subtask = 'climb_up'
self.found_mountain = True
self.allow_escape_water = False
# # OVERWRITE PILLAR CONSTRUCTION
# self.reached_top = True
# self.subtask = 'place_waterfall'
return
if self.moving_towards_mountain:
self.subtask = 'go_to_goal'
return
if state_classifier['has_mountain'] > 0.95 and not self.found_mountain:
self.subtask = 'go_to_goal'
self.moving_towards_mountain = True
return
if self.task == 'CREATE_PEN':
if self.odometry.good_build_spot and not self.pen_built:
self.subtask = 'build_pen'
self.adjusted_head_angle = False
return
# timeout to start pen construction
if env_step > self.timeout_to_build_pen and not self.pen_built:
self.subtask = 'build_pen'
self.adjusted_head_angle = False
return
# lure animals after pen is built
if self.pen_built and not self.animals_lured:
self.subtask = 'lure_animals'
return
# end episode after pen is built and animals are lured
if self.pen_built and self.animals_lured:
self.subtask = 'end_episode'
return
if self.task == 'BUILD_HOUSE':
# finishes episode after house is built
if self.house_built:
self.subtask = 'end_episode'
return
if self.odometry.good_build_spot and not self.house_built:
self.subtask = 'build_house'
self.adjusted_head_angle = False
self.allow_escape_water = False
return
# timeout to start house construction
if env_step > self.timeout_to_build_house and not self.house_built:
self.subtask = 'build_house'
self.adjusted_head_angle = False
self.allow_escape_water = False
return
# default subtask: navigation
if self.task != 'MAKE_WATERFALL':
self.subtask = 'find_goal'
else:
self.subtask = 'go_to_goal'
self.moving_towards_mountain = False
if env_step > self.min_time_look_for_mountain:
self.moving_towards_mountain = True
self.found_mountain = True
def update_vertical_camera_angle(self, action):
# randomly bounces camera up (helps escaping shallow holes)
if np.random.rand() < 0.10:
self.goal_camera_angle = -15
action[6] = 1 # jump
else:
self.goal_camera_angle += 1
self.goal_camera_angle = np.clip(
self.goal_camera_angle, -15, self.default_camera_angle)
# use high camera angles to continue jumping
if self.goal_camera_angle < -10:
action[6] = 1
action[2] = self.goal_camera_angle-self.odometry.camera_angle
return action
def subtask_turn_around(self, action):
action[5] = 0 # dont move forward
action[3] = 15 # turn camera
return action
def subtask_place_waterfall(self):
print('Placing waterfall')
action = th.zeros(12)
action[2] = 50 # look down
action[4] = 11 # equip water bucket
action[11] = 1 # use it
self.built_waterfall = True
return action
def step_escape_water_subtask(self):
action = th.zeros(12)
if self.escape_water_step < self.total_escape_water_steps:
action[5] = 1 # move forward
# if np.random.rand() < 0.1:
action[6] = 1 # jump
if self.task == 'BUILD_HOUSE':
action[4] = self.item_map['stone_pickaxe'] # dont have shovel in build house task
else:
action[4] = self.item_map['stone_shovel'] # equip shovel (breaks dirt/sand blocks if we fall in the hole)
action[0] = 1 # attack
# action[11] = 1 # attack
self.goal_camera_angle = -15
self.escape_water_step += 1
self.executing_multistep_subtask = True
else:
self.goal_camera_angle = self.default_camera_angle
self.escape_water_step = 0
self.executing_multistep_subtask = False
return action
def step_go_to_picture_location(self):
action = th.zeros(12)
if self.go_to_picture_location_step < self.total_go_to_picture_location_steps:
action[5] = 1 # move forward
action[6] = 1 # jump
action[2] = -95/self.total_go_to_picture_location_steps
self.go_to_picture_location_step += 1
self.executing_multistep_subtask = True
else:
self.allow_escape_water = False # do not get scared of waterfall
if self.delay_to_take_picture_step > self.delay_to_take_picture:
# turn around to take picture
# self.goal_camera_angle = self.default_camera_angle
self.executing_multistep_subtask = False
self.good_waterfall_view = True
action[3] = 180/self.delay_to_take_picture
self.delay_to_take_picture_step += 1
return action
def step_climb_up_subtask(self, state_classifier):
# look down, place multiple blocks, jump forward, repeat
action = th.zeros(12)
if self.pillars_built < self.pillars_to_build:
# first, adjust head angle
if not self.adjusted_head_angle:
action = th.zeros(12)
action[2] = 90-self.odometry.camera_angle
self.adjusted_head_angle = True
return action
# pauses every few steps to slow down pillar construction
if self.climb_up_step % self.climb_up_step_frequency == 0:
self.climb_up_step += 1
self.executing_multistep_subtask = True
return action
if self.climb_up_step < self.total_climb_up_steps:
# if np.random.rand() < 0.1:
# action[5] = 1 # move forward
action[6] = 1 # jump
action[4] = 3 # equip block
action[11] = 1 # drop block
self.goal_camera_angle = 90
self.climb_up_step += 1
self.executing_multistep_subtask = True
else:
# # jump forward
# action[5] = 1 # move forward
# action[6] = 1 # jump
# look back
action[1] = 1 # move backward
action[3] = 180
# reset
self.goal_camera_angle = self.default_camera_angle
self.climb_up_step = 0
self.adjusted_head_angle = False
self.pillars_built += 1
else:
self.executing_multistep_subtask = False
# TODO: check if reached top
self.reached_top = True
# if state_classifier['at_the_top_of_a_waterfall'] > 0.5:
# self.reached_top = True
# else:
# # retry
# self.climb_up_step = 0
# self.pillars_built = 0
# self.adjusted_head_angle = False
return action
def step_consensus(self, action, state_classifier):
# keep track of states seen during consensus
for key, value in state_classifier.items():
self.consensus_states[key].append(value)
self.consensus_states['heading'].append(self.odometry.heading.item())
# explore around
if self.consensus_steps < self.num_consensus_steps:
# zero out all previous actions
action = th.zeros(12)
# investigate surroundings
if self.consensus_steps > 0.25*self.num_consensus_steps and \
self.consensus_steps < 0.75*self.num_consensus_steps:
action[3] = np.random.randint(low=2, high=8) # turn camera right
else:
action[3] = -np.random.randint(low=2, high=8) # turn camera left
# step counter
self.consensus_steps += 1
else:
self.consensus_steps = 0
self.triggerred_consensus = False
# # DEBUG: write to disk to better explore consensus solution
# consensus_states_df = pd.DataFrame.from_dict(self.consensus_states)
# consensus_states_df.to_csv("data/sample_consensus_data.csv")
return action
def _initialize_mapping(self):
self.mapping = {
0: "find_goal",
1: "go_to_goal",
2: "end_episode",
3: "climb_up",
4: "climb_down",
5: "place_waterfall",
6: "look_around",
7: "build_pen",
8: "go_to_location",
9: "lure_animals",
10: "leave_pen",
11: "infer_biome",
12: "build_house",
13: "tour_inside_house",
14: "leave_house",
}
self.n_subtasks = len(self.mapping.keys())
def setup_escape_water_subtask(self):
self.escape_water_step = 0
self.total_escape_water_steps = 10
def setup_waterfall_task(self):
# setup available inventory
build_waterfall_items = [
'air','bucket','carrot','cobblestone','fence','fence_gate','none','other',
'snowball','stone_pickaxe','stone_shovel','water_bucket','wheat','wheat_seeds'
]
self.item_map = {build_waterfall_items[i]: i for i in range(len(build_waterfall_items))}
self.moving_towards_mountain = False
self.found_mountain = False
self.reached_top = False
self.adjusted_head_angle = False
self.built_waterfall = False
self.go_to_picture_location_step = 0
self.total_go_to_picture_location_steps = 70
self.delay_to_take_picture = 40
self.delay_to_take_picture_step = 0
self.min_time_look_for_mountain = 1*20 # steps
self.timeout_to_build_waterfall = 90*20
self.climb_up_step_frequency = 5
self.climb_up_step = 0
self.total_climb_up_steps = 5*self.climb_up_step_frequency/2
self.pillars_to_build = 1
self.pillars_built = 0
def setup_house_subtask(self):
# setup available inventory
build_house_items = [
'acacia_door','acacia_fence','cactus','cobblestone','dirt','fence','flower_pot','glass','ladder',
'log#0','log#1','log2#0','none','other','planks#0','planks#1','planks#4','red_flower','sand',
'sandstone#0','sandstone#2','sandstone_stairs','snowball','spruce_door','spruce_fence',
'stone_axe','stone_pickaxe','stone_stairs','torch','wooden_door','wooden_pressure_plate'
]
self.item_map = {build_house_items[i]: i for i in range(len(build_house_items))}
# clone actions
self.build_house_actions = self.clone_human_actions(
dataset_addr="data/MineRLBasaltBuildVillageHouse-v0/v3_specific_quince_werewolf-1_30220-36861/rendered.npz",
start_step=70,
end_step=5519,
)
self.build_house_step = 0
self.total_build_house_steps = self.build_house_actions.shape[1]
self.house_built = False
self.timeout_to_build_house = 20*30
def step_house_subtask(self):
# first, adjust head angle
if not self.adjusted_head_angle:
action = th.zeros(12)
action[2] = 35-self.odometry.camera_angle
action[3] = -10-self.odometry.heading
self.adjusted_head_angle = True
return action
# query cloned action
# skip frames when sending 'use' action to counter delay in tool selection:
# https://minerl.readthedocs.io/en/latest/environments/handlers.html#tool-control-equip-and-use
action = th.from_numpy(self.build_house_actions[:, self.build_house_step])
self.build_house_step += 1
self.executing_multistep_subtask = True
# flags end of construction
if self.build_house_step >= self.total_build_house_steps:
self.house_built = True
self.executing_multistep_subtask = False
return action
def setup_cave_task(self):
# setup available inventory
build_cave_items = [
'air','bucket','carrot','cobblestone','fence','fence_gate','none','other','snowball',
'stone_pickaxe','stone_shovel','water_bucket','wheat','wheat_seeds'
]
self.item_map = {build_cave_items[i]: i for i in range(len(build_cave_items))}
self.timeout_to_find_cave = 170*20
def setup_pen_subtask(self):
# setup available inventory
build_pen_items = [
'air','bucket','carrot','cobblestone','fence','fence_gate','none','other','snowball',
'stone_pickaxe','stone_shovel','water_bucket','wheat','wheat_seeds',
]
self.item_map = {build_pen_items[i]: i for i in range(len(build_pen_items))}
# clone actions
self.build_pen_actions = self.clone_human_actions(
dataset_addr="data/MineRLBasaltCreateVillageAnimalPen-v0/v3_another_spinach_undead-5_28317-30684/rendered.npz",
start_step=330,
end_step=780,
)
self.build_pen_step = 0
self.total_build_pen_steps = self.build_pen_actions.shape[1]
self.pen_built = False
self.timeout_to_build_pen = 20*30
self.timeout_to_lure_animals_sec = 60
self.timeout_to_find_animals_sec = 180
self.animals_lured = False
self.animal_locations = None
self.confirmed_animal_location = False
def step_pen_subtask(self):
# first, adjust head angle
if not self.adjusted_head_angle:
action = th.zeros(12)
action[2] = 35-self.odometry.camera_angle
action[3] = -self.odometry.heading
self.adjusted_head_angle = True
# also store pen location
self.odometry.pen_location = [self.odometry.x.item(), self.odometry.y.item()]
self.odometry.pen_built_time_sec = self.odometry.t
return action
# query cloned action
# skip frames when sending 'use' action to counter delay in tool selection:
# https://minerl.readthedocs.io/en/latest/environments/handlers.html#tool-control-equip-and-use
action = th.from_numpy(self.build_pen_actions[:, self.build_pen_step])
self.build_pen_step += 1
self.executing_multistep_subtask = True
# flags end of construction
if self.build_pen_step >= self.total_build_pen_steps:
self.pen_built = True
self.executing_multistep_subtask = False
return action
def step_lure_animals_subtask(self, obs):
# keep food in hand
# 2: carrot
# 12: wheat
# 13: wheat seeds
#
# NOTE: still working on "food selection" model, equipping always wheat
# because cows and sheeps are more likely to spawn
action = th.zeros(12)
action[4] = 12
if not self.confirmed_animal_location:
# navigate to animal location (last seen)
if len(self.odometry.has_animals_spot_coords['x']) == 0:
# timeout to stop looking for animals
if self.odometry.t > self.timeout_to_find_animals_sec:
print('No luck finding animals.')
action = self.subtask_end_episode()
return action
print('No animals so far.')
# keep roaming and looking for animals
action = self.subtask_find_goal(obs)
else:
num_has_animals_spot = len(self.odometry.has_animals_spot_coords['x'])
goal_x = self.odometry.has_animals_spot_coords['x'][0]
goal_y = self.odometry.has_animals_spot_coords['y'][0]
distance, angular_diff = self.compute_stats_to_goal(goal_x, goal_y)
print(f'Last seen animal at {goal_x:.2f}, {goal_y:.2f} ({num_has_animals_spot} total)')
print(f' distance: {distance:.2f} m')
print(f' angle difference: {angular_diff:.2f} deg')
# turn camera to goal and move forward
action = self.subtask_go_to_location(action, angular_diff)
if distance < 1.0: # meters
self.confirmed_animal_location = True
# keep executing task
self.executing_multistep_subtask = True
else:
# confirmed animal location, go back to pen
# TODO: need to identify animal first to lure them
# right now, just ends the task after going back to the pen location
goal_x = self.odometry.pen_location[0]
goal_y = self.odometry.pen_location[1]
distance, angular_diff = self.compute_stats_to_goal(goal_x, goal_y)
time_after_pen_built = self.odometry.t-self.odometry.pen_built_time_sec
print(f'Pen built at {goal_x:.2f}, {goal_y:.2f}')
print(f' distance: {distance:.2f} m')
print(f' angle difference: {angular_diff:.2f} deg')
print(f' time_after_pen_built: {time_after_pen_built:.2f} sec')
# turn camera to goal and move forward
action = self.subtask_go_to_location(action, angular_diff)
# keep executing task
self.executing_multistep_subtask = True
# end episode when back to pen or if took too long
if distance < 1.0 or time_after_pen_built > self.timeout_to_lure_animals_sec:
self.animals_lured = True
self.executing_multistep_subtask = False
return action
def compute_stats_to_goal(self, goal_x, goal_y):
dist_x = goal_x-self.odometry.x.item()
dist_y = goal_y-self.odometry.y.item()
distance = np.sqrt(dist_x**2+dist_y**2)
angular_diff = self.odometry.heading.item()-np.rad2deg(np.arctan2(
self.odometry.x.item()-goal_x,
goal_y-self.odometry.y.item()))
if self.odometry.heading.item() < 0:
angular_diff += 90
else:
angular_diff -= 90
if angular_diff >= 360.0 or angular_diff <= 360.0:
angular_diff = angular_diff % 360.0
return distance, angular_diff
def subtask_go_to_location(self, action, angular_diff):
# turn camera towards goal heading
cam_limit = 3.0
action[3] = np.clip(angular_diff-self.odometry.heading.item(), -cam_limit, cam_limit)
# move forward once heading is on track
if action[3] < np.abs(cam_limit):
action[5] = 1
# randomly jump
if np.random.rand() < 0.25:
action[6] = 1 # jump
return action
def clone_human_actions(self, dataset_addr, start_step, end_step):
# load human data
data = dict(np.load(dataset_addr))
# substitute item names by numbers
last_equipped_item = 'none'
action_equip_num = []
for i in range(data['action$equip'].shape[0]):
# replace 'none' equip actions by last equipped item
# fixes mismatch between human collected dataset and minerl env as explained here:
# https://minerl.readthedocs.io/en/latest/environments/handlers.html#tool-control-equip-and-use
if data['action$equip'][i] == 'none':
data['action$equip'][i] = last_equipped_item
else:
last_equipped_item = data['action$equip'][i]
action_equip_num.append(self.item_map[data['action$equip'][i]])
# stack all actions
action_data = np.vstack((
data['action$attack'].astype(int),
data['action$back'].astype(int),
data['action$camera'][:,0].astype(float),
data['action$camera'][:,1].astype(float),
action_equip_num,
data['action$forward'].astype(int),
data['action$jump'].astype(int),
data['action$left'].astype(int),
data['action$right'].astype(int),
data['action$sneak'].astype(int),
data['action$sprint'].astype(int),
data['action$use'].astype(int),
))
# replay only a fraction of the demonstrations data
action_data = action_data[:, start_step:end_step]
# store original list of equipped items for debugging purposes
self.original_equip_items = data['action$equip'][start_step:end_step]
return action_data
class KAIROS_Vision():
"""
Extracts vision features from agent's POV.
"""
def __init__(self, state_classifier_model, device):
self.state_classifier = state_classifier_model
self.device = device
self.num_classes = 13
# internal count of environment steps
self.env_step_t = 0
# map state classifier index to names
self.map = {
0: 'no_labels',
1: 'has_cave',
2: 'inside_cave',
3: 'danger_ahead',
4: 'has_mountain',
5: 'facing_wall',
6: 'at_the_top_of_a_waterfall',
7: 'good_view_of_waterfall',
8: 'good_view_of_pen',
9: 'good_view_of_house',
10: 'has_animals',
11: 'has_open_space',
12: 'animals_inside pen',
}
def extract_features(self, obs):
# classify input state
# convert to torch and generate predictions
obs = th.Tensor(obs).unsqueeze(0).to(self.device)
state_classifier_probs = self.state_classifier(obs)
# build features dict
state_classifier = {}
for i in range(self.num_classes):
state_classifier[self.map[i]] = state_classifier_probs[0].data[i].item()
# keep track of environment steps
self.env_step_t += 1
return state_classifier
class KAIROS_Odometry():
"""
Estimates position for the agent and point of interest based on images received
and actions taken.
"""
def __init__(self, exp_id, state_classifier_map):
# initialize states
self.t, self.x, self.y = 0., 0., 0.
self.heading, self.vel, self.camera_angle = 0., 0., 0.
# minecraft motion constants
self.fps = 20
self.dt = 1/self.fps
self.swim_vel = 2.2 # m/s (surface)
self.walk_vel = 4.317 # m/s
self.sprint_vel_bonus = 5.612-self.walk_vel # m/s
self.sprint_jump_vel_bonus = 7.127-self.walk_vel # m/s
self.animal_range = 6. # meters
self.detection_range = 10. # meters (crude estimate)
self.map_resolution = 10 # scales pixels for odometry frame (higher, more precise)
self.pen_location = [0,0]
self.pen_built_time_sec = 0
# save logs to disk
self.exp_id = exp_id
self.state_classifier_map = state_classifier_map
self.odometry_log = {
'env_step': [0],
't': [self.t],
'x': [self.x],
'y': [self.y],
'heading': [self.heading],
'vel': [self.vel],
'camera_angle': [self.camera_angle],
}
# add all classified states to log
for i in range(len(self.state_classifier_map.keys())):
self.odometry_log[self.state_classifier_map[i]] = [0.]
self.actions_log = [np.zeros(12)]
# colors to display features
self.agent_color=(0,0,255)
self.danger_ahead_color=(255,0,0)
self.has_animals_color=(212,170,255)
self.has_open_space_color=(86,255,86)
self.has_cave_color=(34,48,116)
self.at_the_top_of_a_waterfall_color=(228,245,93)
# keep track of good build spots and if swimming
self.good_build_spot = False
self.agent_swimming = False
self.has_animals_spot = False
self.top_of_waterfall_spot = False
self.has_animals_spot_coords = {
't': [], 'x': [], 'y': [], 'verified': []
}
def update(self, action, env_step, state_classifier):
# compute current velocity
# [0] "attack"
# [1] "back"
# [2] "camera_up_down" (float)
# [3] "camera_right_left" (float)
# [4] "equip"
# [5] "forward"
# [6] "jump"
# [7] "left"
# [8] "right"
# [9] "sneak"
# [10] "sprint"
# [11] "use"
if action[6]: # jumping
self.vel = (action[5]-action[1])*(self.walk_vel+self.sprint_jump_vel_bonus*action[10])
else:
self.vel = (action[5]-action[1])*(self.walk_vel+self.sprint_vel_bonus*action[10])
# update heading based on camera movement
self.heading += action[3]
if self.heading >= 360.0 or self.heading <= -360.0:
self.heading = self.heading % 360.0
self.camera_angle += action[2]
# update position based on estimated velocity
self.t += self.dt
self.x += self.vel*np.cos(np.deg2rad(self.heading))*self.dt
self.y += self.vel*np.sin(np.deg2rad(self.heading))*self.dt
# add states identified by the state classifier to the odometry logs
for i in range(len(self.state_classifier_map.keys())):
self.odometry_log[self.state_classifier_map[i]].append(state_classifier[self.state_classifier_map[i]])
# update logs
self.odometry_log['env_step'].append(env_step)
self.odometry_log['t'].append(self.t)
self.odometry_log['x'].append(self.x.item())
self.odometry_log['y'].append(self.y.item())
self.odometry_log['heading'].append(self.heading)
self.odometry_log['camera_angle'].append(self.camera_angle)
self.odometry_log['vel'].append(self.vel.item())
self.actions_log.append(action.numpy().flatten())
# Convert coordinates to pixel value to display in odometry map
def coord_to_pixel(self, x, y):
x_pos = int(self.map_resolution*(x+self.min_x))
y_pos = int(self.map_resolution*(y+self.min_y))
return x_pos, y_pos
def tag_relevant_state(self, odom_frame, state_name, color, radius, confidence=0.65, fill=1):
states = np.array(self.odometry_log[state_name])
relevant_states = np.argwhere(states > confidence)
for i in range(relevant_states.shape[0]):
idx = relevant_states[i][0]
x_pos, y_pos = self.coord_to_pixel(
x=self.odometry_log['x'][idx],
y=self.odometry_log['y'][idx])
odom_frame = cv2.circle(odom_frame, (y_pos, x_pos), radius, color, fill)
def generate_frame(self):
# Keep track of image bounds
all_xs = np.array(self.odometry_log['x'])
all_ys = np.array(self.odometry_log['y'])
self.min_x = np.abs(all_xs.min())
self.min_y = np.abs(all_ys.min())
# Convert odometry to pixels
x = (self.map_resolution*(all_xs+self.min_x)).astype(int)
y = (self.map_resolution*(all_ys+self.min_y)).astype(int)
# Setup odometry image with maximum x or y dimension
max_coord = max(x.max(), y.max())
odom_frame = np.zeros((max_coord+1, max_coord+1, 3), np.uint8)
# Substitute coordinates as white pixels
odom_frame[x, y] = 255
# Add circle to current robot position
x_pos = x[-1]
y_pos = y[-1]
odom_frame = cv2.circle(odom_frame, (y_pos, x_pos), 5, self.agent_color, -1)
# Add markers to relevant classified states
self.tag_relevant_state(odom_frame, state_name='has_open_space',
color=self.has_open_space_color, radius=35)
self.tag_relevant_state(odom_frame, state_name='danger_ahead',
color=self.danger_ahead_color, radius=15, fill=3)
self.tag_relevant_state(odom_frame, state_name='has_animals',
color=self.has_animals_color, radius=5, fill=-1, confidence=0.75)
self.tag_relevant_state(odom_frame, state_name='has_cave',
color=self.has_cave_color, radius=15, fill=-1, confidence=0.75)
self.tag_relevant_state(odom_frame, state_name='at_the_top_of_a_waterfall',
color=self.at_the_top_of_a_waterfall_color, radius=15, fill=-1, confidence=0.75)
# Make sure image always has the same size
odom_frame = cv2.resize(odom_frame, (512, 512), interpolation=cv2.INTER_LINEAR)
# Add text with odometry info
font = cv2.FONT_HERSHEY_SIMPLEX
thickness = 1
font_scale = 0.5
text_color = (255, 255, 255)
# left column text
odom_frame = cv2.putText(odom_frame, f'x: {self.x:.2f} m', (10, 20), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, f'y: {self.y:.2f} m', (10, 40), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, f'heading: {self.heading:.2f} deg', (10, 60), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, f'camera_angle: {self.camera_angle:.2f} deg', (10, 80), font,
font_scale, text_color, thickness, cv2.LINE_AA)
if self.good_build_spot:
odom_frame = cv2.putText(odom_frame, 'GOOD BUILD SPOT', (10, 120), font,
font_scale, (0,255,0), thickness, cv2.LINE_AA)
if self.agent_swimming:
odom_frame = cv2.putText(odom_frame, 'AGENT SWIMMING', (10, 140), font,
font_scale, (0,0,255), thickness, cv2.LINE_AA)
if self.has_animals_spot:
odom_frame = cv2.putText(odom_frame, 'SPOT HAS ANIMALS', (10, 160), font,
font_scale, self.has_animals_color, thickness, cv2.LINE_AA)
self.has_animals_spot_coords['t'].append(self.t)
self.has_animals_spot_coords['x'].append(self.x.item())
self.has_animals_spot_coords['y'].append(self.y.item())
self.has_animals_spot_coords['verified'].append(False)
if self.top_of_waterfall_spot:
odom_frame = cv2.putText(odom_frame, 'GOOD SPOT FOR WATERFALL', (10, 180), font,
font_scale, self.at_the_top_of_a_waterfall_color, thickness, cv2.LINE_AA)
# right column text
odom_frame = cv2.putText(odom_frame, f'time: {self.t:.2f} sec', (352, 20), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'legend:', (352, 60), font,
font_scale, text_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'agent', (372, 80), font,
font_scale, self.agent_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'danger_ahead', (372, 100), font,
font_scale, self.danger_ahead_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'has_animals', (372, 120), font,
font_scale, self.has_animals_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'has_open_space', (372, 140), font,
font_scale, self.has_open_space_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'has_cave', (372, 160), font,
font_scale, self.has_cave_color, thickness, cv2.LINE_AA)
odom_frame = cv2.putText(odom_frame, 'top_of_waterfall', (372, 180), font,
font_scale, self.at_the_top_of_a_waterfall_color, thickness, cv2.LINE_AA)
return odom_frame
def close(self):
# save logs to disk
os.makedirs('train/odometry', exist_ok=True)
odometry_log_df = pd.DataFrame.from_dict(self.odometry_log)
action_log_df = pd.DataFrame(self.actions_log, columns=[
'attack', 'back', 'equip', 'forward', 'jump', 'left', 'right',
'sneak', 'sprint', 'use', 'camera_up_down', 'camera_right_left'])
log_df = | pd.concat([odometry_log_df, action_log_df], axis=1) | pandas.concat |
import json
import os
import pandas
from tools.dataset_tool import dfs_search
data_path = "../input/"
recursive = False
file_list = []
file_list = file_list + dfs_search(os.path.join(data_path, ''), recursive)
file_list = [file for file in file_list if 'train' in file]
file_list.sort()
rawinput = []
for filename in file_list:
f = open(filename, "r", encoding='utf8')
for line in f:
data = json.loads(line)
# filter dataset for Single option model and Multiple option model.
# clean up answers.
data["answer"] = [a for a in data["answer"] if a != "。"]
rawinput.append(json.loads(line))
df = | pandas.DataFrame(columns=["q","a","r"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 15:48:30 2020
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
from ventiliser.BreathVariables import BreathVariables
class Evaluation:
"""
Class to help visualise and evaluate breaths extracted from a record.
Attributes
----------
pressures : Array like of real
Pressure data points for a record
flows : Array like of real
Flow data points for a record
breaths : Array like of BreathVariables
Breaths as calculating using mapper and phaselabeller
freq : real
Frequency in Hz of the recording sampling rate
"""
@staticmethod
def load_breaths_from_csv(path):
"""
Utility method to load a csv file output from PhaseLabeller.get_breaths_raw to a list of BreathVariables
Parameters
----------
path : string
Path to the raw breaths file
Returns
-------
array like of BreathVariables objects
"""
csv = pd.read_csv(path)
breaths = csv.apply(Evaluation.__breathvariables_from_row, axis=1)
return breaths
@staticmethod
def __breathvariables_from_row(x):
"""
Helper method to apply on pandas dataframe
Parameters
----------
x : Pandas Series
A row from a pandas dataframe containing breaths
Returns
-------
BreathVariables object
"""
output = BreathVariables()
for attr in x.index:
setattr(output, attr, int(x[attr]))
return output
def __init__(self, pressures, flows, breaths, freq):
"""
Initialises the evaluation object with the data, predicted breaths, and frequency of the record
Parameters
----------
pressures : Array like of real
Pressure data points for a record
flows : Array like of real
Flow data points for a record
breaths : Array like of BreathVariables
Breaths as calculating using mapper and phaselabeller
freq : real
Frequency in Hz of the recording sampling rate
Returns
-------
None
"""
self.pressures = np.array(pressures)
self.flows = np.array(flows)
self.breaths = breaths
self.freq = freq
def compare(self, labels, breath_attr):
"""
Compares an attribute from the currently loaded breaths with a list of values. Identifies the label which is the closest match to each breath.
Parameters
----------
labels : array like of int
A list of indices that you wish to compare with the currently loaded breaths
breath_attr : string
A BreathVariables attribute you wish to perform the comparison on
Returns
-------
Pandas Dataframe
A dataframe containing the closest matching breath to the given labels based on the attribute along with the difference
"""
if self.breaths is None:
print("No breaths to compare to")
return
labels = np.array(labels)
output = []
for breath in self.breaths:
delta = abs(labels - getattr(breath, breath_attr))
best = np.argmin(np.array(delta))
output += [{"breath_index" : self.breaths.index(breath), "label_index" : best, "delta" : delta[best]}]
return | pd.DataFrame(output) | pandas.DataFrame |
#!/usr/bin/python3
import json
from SPARQLWrapper import SPARQLWrapper, POST
import psycopg2
import pandas
def main():
conn = psycopg2.connect(database='htsworkflow', host='felcat.caltech.edu')
#total = 0
#total += display_subclass_tree('http://purl.obolibrary.org/obo/UBERON_0001134', conn=conn)
#total += display_subclass_tree('http://purl.obolibrary.org/obo/UBERON_0001015', conn=conn)
#total += display_subclass_tree('http://purl.obolibrary.org/obo/UBERON_0011906', conn=conn)
#total += display_subclass_tree('http://purl.obolibrary.org/obo/UBERON_0014892', conn=conn)
#total += display_subclass_tree('http://purl.obolibrary.org/obo/CL_0000187', conn=conn)
#total += display_subclass_tree('http://purl.obolibrary.org/obo/CL_0000188', conn=conn)
#print('Found {} biosaples'.format(total))
tables = []
#tables.extend(find_biosamples('http://purl.obolibrary.org/obo/UBERON_0001134', conn=conn))
#tables.extend(find_biosamples('http://purl.obolibrary.org/obo/UBERON_0001015', conn=conn))
#tables.extend(find_biosamples('http://purl.obolibrary.org/obo/UBERON_0011906', conn=conn))
#tables.extend(find_biosamples('http://purl.obolibrary.org/obo/UBERON_0014892', conn=conn))
tables.extend(find_biosamples('http://purl.obolibrary.org/obo/CL_0000187', conn=conn))
tables.extend(find_biosamples('http://purl.obolibrary.org/obo/CL_0000515', conn=conn))
df = | pandas.DataFrame(tables) | pandas.DataFrame |
import pandas as pd
import numpy as np
from pathlib import Path
from datetime import datetime as dt
def mergeManagers(managers, gameLogs):
#Sum up doubled data
managers = managers.groupby(['yearID','playerID'], as_index=False)['Games','Wins','Losses'].sum()
#Get visiting managers
visitingManagers = gameLogs[['row','Date','Visiting team manager ID']]
visitingManagers['yearID'] = pd.DatetimeIndex(pd.to_datetime(visitingManagers['Date'])).year-1
visitingManagers = pd.merge(visitingManagers, managers, left_on=['yearID','Visiting team manager ID'], right_on=['yearID','playerID'], how="left")
#Get home managers
homeManagers = gameLogs[['row','Date','Home team manager ID']]
homeManagers['yearID'] = pd.DatetimeIndex( | pd.to_datetime(homeManagers['Date']) | pandas.to_datetime |
import pandas as pd
import networkx as nx
import os
import csv
import matplotlib.pyplot as plt
from networkx.algorithms.community import k_clique_communities
import pygraphviz
from networkx.drawing.nx_agraph import graphviz_layout
from itertools import groupby
import numpy as np
from nxviz import CircosPlot
from nxviz.plots import ArcPlot, MatrixPlot
import yaml
# ## Connectivity between countries
# We need to compute connectivity of coauthors from these countries
# List of official countries names
with open('./countries.yaml', 'r') as f:
countries = list((yaml.load(f))['countries'].values())
countries.extend(['Moldova','South Korea'])
def get_country_name(data):
computed = ''
for name in data.split(' '):
computed = ("{} {}".format(computed, name)).strip()
if computed in countries:
return computed
else:
continue
return data
# # Get an network of a specified ego
def get_network(id):
nodes = []
edges = []
node_file, edge_file = ('networks/' + str(id) + '_nodes.csv', 'networks/' + str(id) + '_edges.csv')
if os.path.exists(node_file) and os.path.exists(edge_file):
with open(node_file, 'r') as reader:
csvreader = csv.DictReader(reader, dialect='excel')
for row in csvreader:
# TODO: Normalize attributes (country especially)
row['Country'] = get_country_name(row['Country'])
nodes.append(row)
with open(edge_file, 'r') as reader:
csvreader = csv.DictReader(reader, dialect='excel')
for row in csvreader:
edges.append(row)
return nodes,edges
# ## Build network of a specifiecd ego
# Network will be multilayered by years of collaboration between actors (authors)
def load_network(id, nodes, edges):
graph = nx.MultiGraph()
for node in nodes:
graph.add_node(node['Id'], label=node['Name'], university=node['University'], country=node['Country'], citations=int(node['Citations']))
for edge in edges:
graph.add_edge(edge['Source'], edge['Target'], weight=int(edge['Weight']), year=edge['Year'])
try:
graph.remove_node(id)
except:
#nothing
print('{} is not present in network'.format(id))
return graph
def draw_graph(graph):
# pos = graphviz_layout(graph, prog='twopi', args='')
# plt.figure(figsize=(8, 8))
# nx.draw(graph, pos, node_size=20, alpha=0.5, node_color="blue", with_labels=False)
# plt.axis('equal')
# plt.show()
# options = {
# 'node_color': 'black',
# 'node_size': 50,
# 'line_color': 'grey',
# 'linewidths': 0,
# 'width': 0.1,
# }
# nx.draw(graph, **options)
# plt.show()
# Assume we have a professional network of physicians belonging to hospitals.
# c = CircosPlot(graph, node_color='university', node_grouping='university')
c = ArcPlot(graph, node_color="country", node_grouping="university", group_order="alphabetically")
c.draw()
plt.show() # only needed in scripts
def analyze_graph(graph):
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.triangles.html
# Triangles per nodes, we should analyse the average per graph
triangles = np.average(list(nx.triangles(graph).values()))
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.transitivity.html
transitivity = nx.transitivity(graph)
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html
# clustering = nx.clustering(graph, weight='weight').values()
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.cluster.average_clustering.html
average_clustering = nx.average_clustering(graph, weight='weight', count_zeros=False)
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.bipartite.centrality.closeness_centrality.html
closeness = nx.closeness_centrality(graph).values()
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.bipartite.centrality.betweenness_centrality.html
betweenness = nx.betweenness_centrality(graph).values()
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.assortativity.degree_assortativity_coefficient.html
homophily = nx.degree_assortativity_coefficient(graph, weight='weight')
# https://networkx.github.io/documentation/networkx-1.9.1/reference/generated/networkx.algorithms.assortativity.attribute_assortativity_coefficient.html
# Homophily by citations
homophily_citations = nx.attribute_assortativity_coefficient(graph, 'citations')
# Homophily by university
homophily_university = nx.attribute_assortativity_coefficient(graph, 'university')
return {
'triangles': np.round(triangles, 2),
'transitivity': transitivity,
# 'clustering': clustering,
'average_clustering': average_clustering,
'closeness': list(closeness),
'betweenness': list(betweenness),
'homophily': homophily,
'homophily_citations': homophily_citations,
'homophily_university': homophily_university
}
def build_analysis():
n_list = | pd.read_csv('phys_networks.csv', usecols=[0]) | pandas.read_csv |
# coding: utf-8
import numpy as np
import pandas as pd
import mplleaflet
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# from matplotlib.ticker import FixedLocator, LinearLocator, FormatStrFormatter
# import datetime
# Import data
df_GHCN = | pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv') | pandas.read_csv |
import datetime as dt
import pandas as pd
from .. import AShareDataReader, DateUtils, DBInterface, utils
from ..config import get_db_interface
class IndustryComparison(object):
def __init__(self, index: str, industry_provider: str, industry_level: int, db_interface: DBInterface = None):
if not db_interface:
db_interface = get_db_interface()
self.data_reader = AShareDataReader(db_interface)
self.industry_info = self.data_reader.industry(industry_provider, industry_level)
self.index = index
def holding_comparison(self, holding: pd.Series):
holding_ratio = self._holding_to_ratio(holding)
return self.industry_ratio_comparison(holding_ratio)
def industry_ratio_comparison(self, holding_ratio: pd.Series):
date = holding_ratio.index.get_level_values('DateTime').unique()[0]
industry_info = self.industry_info.get_data(dates=date).stack()
industry_info.name = 'industry'
index_comp = self.data_reader.index_constitute.get_data(index_ticker=self.index, date=date)
holding_industry = self._industry_ratio(holding_ratio, industry_info) * 100
index_industry = self._industry_ratio(index_comp, industry_info)
diff_df = | pd.concat([holding_industry, index_industry], axis=1, sort=True) | pandas.concat |
import pandas as pd
import pickle
from sklearn.linear_model import Lasso
from xgboost import XGBRegressor
from sklearn.ensemble import RandomForestRegressor as RFR
from hyperopt import hp, fmin, tpe, STATUS_OK
from sklearn.model_selection import cross_val_score
def lasso_regression(X_train, y_train, X_test, y_test, y_train_scaled, target_mean, target_std):
'''
:param X_train: Input Feature data for Train
:param y_train: Output feature for Train (Density)
:param X_test: Input Feature data for Test
:param y_test: Output feature for Test (Density)
:param y_train_scaled: Scaled output for Train (Scaled Density)
:param target_mean: Mean of output feature (Density)
:param target_std: Standard Deviation of output feature (Density)
:return: Dumps the Actual v/s Predicted Values and LASSO Coefficients in csv
'''
lasso_model = Lasso(alpha=0.1)
lasso_model.fit(X_train.values, y_train_scaled.values)
with open('model_objects/chemml_lasso.pickle', 'wb') as handle:
pickle.dump(lasso_model, handle, protocol=pickle.HIGHEST_PROTOCOL)
y_train_predicted = [(_ * target_std) + target_mean for _ in list(lasso_model.predict(X_train))]
y_test_predicted = [(_ * target_std) + target_mean for _ in list(lasso_model.predict(X_test))]
df_train_lasso = pd.concat([y_train, pd.DataFrame({'predicted_density': y_train_predicted})], ignore_index=False,
axis=1)
df_test_lasso = pd.concat([y_test, pd.DataFrame({'predicted_density': y_test_predicted})], ignore_index=False,
axis=1)
df_train_lasso.to_csv('data/df_train_actual_vs_predicted_lasso.csv', index=False)
df_test_lasso.to_csv('data/df_test_actual_vs_predicted_lasso.csv', index=False)
df_lasso_coeffs = pd.DataFrame({'feature': list(X_train.columns), 'coefficients': list(lasso_model.coef_)})
df_lasso_coeffs['abs_'] = df_lasso_coeffs.coefficients.abs()
df_lasso_coeffs.sort_values(by='abs_', ascending=False, inplace=True)
df_lasso_coeffs.index = range(len(df_lasso_coeffs))
df_lasso_coeffs.drop(columns='abs_', axis=1, inplace=True)
df_lasso_coeffs.to_csv('data/df_lasso_coefficients.csv', index=False)
def objective_fn_rfr(params, X_train, y_train_scaled):
'''
:param params: Hyper-parameter Grid
:param X_train: Input Feature data for Train
:param y_train_scaled: Scaled output for Train (Scaled Density)
:return: Score value
'''
global it_, scores_
params = {
'max_depth': int(params['max_depth']),
'n_estimators': int(params['n_estimators']),
'min_samples_split': params['min_samples_split'],
'min_samples_leaf': params['min_samples_leaf'],
'max_features': params['max_features'],
'oob_score': params['oob_score'],
'max_samples': params['max_samples']
}
clf = RFR(n_jobs=3, **params)
it_ = it_ + 1
score = cross_val_score(clf, X_train.values, y_train_scaled.values.ravel(), scoring='neg_root_mean_squared_error',
cv=4).mean()
with open("logs_rf.txt", "a") as myfile:
myfile.write('------------------- On {} ------------------\n'.format(it_))
myfile.write('Params : {}\n'.format(params))
myfile.write('RMSE : {}\n'.format(-score))
return {'loss': 1 - score, 'status': STATUS_OK}
def objective_fn_xgb(params, X_train, y_train_scaled):
'''
:param params: Hyper-parameter Grid
:param X_train: Input Feature data for Train
:param y_train_scaled: Scaled output for Train (Scaled Density)
:return: Score value
'''
global it_, scores_
params = {
'max_depth': int(params['max_depth']),
'n_estimators': int(params['n_estimators']),
'reg_alpha': params['reg_alpha'],
'reg_lambda': params['reg_lambda']
}
clf = XGBRegressor(learning_rate=0.01, n_jobs=3, **params)
it_ = it_ + 1
score = cross_val_score(clf, X_train.values, y_train_scaled.values, scoring='neg_root_mean_squared_error',
cv=4).mean()
with open("logs_xgb.txt", "a") as myfile:
myfile.write('------------------- On {} ------------------\n'.format(it_))
myfile.write('Params : {}\n'.format(params))
myfile.write('RMSE : {}\n'.format(-score))
return {'loss': 1 - score, 'status': STATUS_OK}
def xgb_model():
'''
:return: Minimized Loss Function
'''
space = {
'max_depth': hp.choice('max_depth', [5, 7, 10]),
'n_estimators': hp.choice('n_estimators', [50, 100, 150, 200]),
'reg_alpha': hp.choice('reg_alpha', [0.01, 0.1, 0.5, 1]),
'reg_lambda': hp.choice('reg_lambda', [0.01, 0.1, 0.5, 1])}
return fmin(fn=objective_fn_xgb, space=space, algo=tpe.suggest, max_evals=800)
def random_forest_model():
'''
:return: Minimized Loss Function
'''
space = {
'max_depth': hp.choice('max_depth', [5, 7, 10]),
'n_estimators': hp.choice('n_estimators', [50, 125, 200]),
'min_samples_split': hp.choice('min_samples_split', [2, 4, 6]),
'min_samples_leaf': hp.choice('min_samples_leaf', [1, 3, 5]),
'max_features': hp.choice('max_features', ['auto', 'sqrt', 'log2']),
'oob_score': hp.choice('oob_score', [True, False]),
'max_samples': hp.choice('max_samples', [100, 150, 200])}
return fmin(fn=objective_fn_rfr, space=space, algo=tpe.suggest, max_evals=1500)
def dump_xgboost_model(X_train, y_train_scaled):
'''
:param X_train: Input Feature data for Train
:param y_train_scaled: Scaled output for Train (Scaled Density)
:return: Dumps XGBOOST trained model
'''
params = {'learning_rate': 0.01, 'max_depth': 5, 'n_estimators': 200, 'reg_alpha': 0.01, 'reg_lambda': 0.01}
model_ = XGBRegressor(**params)
model_.fit(X_train, y_train_scaled, verbose=True)
with open('model_objects/chemml_xgboost.pickle', 'wb') as handle:
pickle.dump(model_, handle, protocol=pickle.HIGHEST_PROTOCOL)
def dump_random_forest_model(X_train, y_train_scaled):
'''
:param X_train: Input Feature data for Train
:param y_train_scaled: Scaled output for Train (Scaled Density)
:return: Dumps Random Forest trained model
'''
params = {'max_depth': 10, 'n_estimators': 50, 'min_samples_split': 2, 'min_samples_leaf': 1,
'max_features': 'auto', 'oob_score': True, 'max_samples': 200}
model_ = RFR(**params)
model_.fit(X_train.values, y_train_scaled.values.ravel())
with open('model_objects/chemml_rfr.pickle', 'wb') as handle:
pickle.dump(model_, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_and_predict(model_object_path, target_mean, target_std, X_train, X_test, y_train, y_test):
'''
:param model_object_path: Path to model object pickle
:param target_mean: Mean of output feature (Density)
:param target_std: Standard Deviation of output feature (Density)
:param X_train: Input Feature data for Train
:param X_test: Input Feature data for Test
:param y_train: Output feature for Train (Density)
:param y_test: Output feature for Test (Density)
:return: Actual v/s Predicted values for Train & Test
'''
with open(model_object_path, 'rb') as input_file:
model_obj = pickle.load(input_file)
y_train_predicted = [(_ * target_std) + target_mean for _ in list(model_obj.predict(X_train))]
y_test_predicted = [(_ * target_std) + target_mean for _ in list(model_obj.predict(X_test))]
df_train_xgb = pd.concat([y_train, | pd.DataFrame({'predicted_density': y_train_predicted}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import torch
import torch.optim as optim
from train import train, loss_func, test
from model import NN, CNN
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.tree import DecisionTreeRegressor
from densratio import densratio
from pykliep import DensityRatioEstimator
import xgboost as xgb
file_names = ['books_processed_balanced',
'dvd_processed_balanced',
'electronics_processed_balanced',
'kitchen_processed_balanced']
def calc_result(reg, x0, y0, x1, y1, dr=None):
reg.fit(x0, y0, sample_weight=dr)
train_loss = np.mean((y0 - reg.predict(x0))**2)
test_loss = np.mean((y1 - reg.predict(x1))**2)
rating_temp = y1.copy()
rating_temp[rating_temp >= 3] = 100
auc = calc_auc(rating_temp, reg.predict(x1))
return train_loss, test_loss, auc
def calc_auc(y, f):
fpr, tpr, _ = metrics.roc_curve(y, f, pos_label=100)
auc = metrics.auc(fpr, tpr)
return 1-auc
def main():
ite = 10
num_train_data = 2000
num_test_data = 2000
Net = NN
model_num = 3
learning_rate = 1e-4
epoch = 200
batchsize = 256
seed = 2020
for f_name_idx0 in range(len(file_names)):
for f_name_idx1 in range(f_name_idx0+1, len(file_names)):
train_loss_normal = np.zeros((ite, model_num))
test_loss_normal = np.zeros((ite, model_num))
auc_normal = np.zeros((ite, model_num))
train_loss_kerulsif = np.zeros((ite, model_num))
test_loss_kerulsif = np.zeros((ite, model_num))
auc_kerulsif = np.zeros((ite, model_num))
train_loss_kerkleip = np.zeros((ite, model_num))
test_loss_kerkleip = np.zeros((ite, model_num))
auc_kerkleip = np.zeros((ite, model_num))
train_loss_pu = np.zeros((ite, model_num))
test_loss_pu = np.zeros((ite, model_num))
auc_pu = np.zeros((ite, model_num))
train_loss_ulsif = np.zeros((ite, model_num))
test_loss_ulsif = np.zeros((ite, model_num))
auc_ulsif = np.zeros((ite, model_num))
train_loss_nnpu = np.zeros((ite, model_num))
test_loss_nnpu = np.zeros((ite, model_num))
auc_nnpu = np.zeros((ite, model_num))
train_loss_nnulsif = np.zeros((ite, model_num))
test_loss_nnulsif = np.zeros((ite, model_num))
auc_nnulsif = np.zeros((ite, model_num))
f_name0 = file_names[f_name_idx0]
f_name1 = file_names[f_name_idx1]
for i in range(ite):
np.random.seed(seed)
if f_name0 != f_name1:
data0 = | pd.read_csv('dataset/%s.csv'%f_name0) | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query,
partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_without_partition_range(postgres_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="Int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(
["a", "c"], dtype="object"
),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series(
[None, None], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_selection(postgres_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_projection(postgres_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_join(postgres_url: str) -> None:
query = "SELECT T.test_int, T.test_bool, S.test_language FROM test_table T INNER JOIN test_str S ON T.test_int = S.id"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(5),
data={
"test_int": pd.Series([0, 1, 2, 3, 4], dtype="Int64"),
"test_bool": pd.Series([None, True, False, False, None], dtype="boolean"),
"test_language": pd.Series(
["English", "中文", "日本語", "русский", "Emoji"], dtype="object"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_spja(postgres_url: str) -> None:
query = "select test_bool, AVG(test_float) as avg, SUM(test_int) as sum from test_table as a, test_str as b where a.test_int = b.id AND test_nullint is not NULL GROUP BY test_bool ORDER BY sum"
df = read_sql(postgres_url, query,
partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_on_utf8(postgres_url: str) -> None:
query = "SELECT * FROM test_str"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(8),
data={
"id": pd.Series([0, 1, 2, 3, 4, 5, 6, 7], dtype="Int64"),
"test_language": pd.Series(
["English", "中文", "日本語", "русский", "Emoji", "Latin1", "Extra", "Mixed"], dtype="object"
),
"test_hello": pd.Series(
["Hello", "你好", "こんにちは", "Здра́вствуйте", "😁😂😜", "¥§¤®ð", "y̆", "Ha好ち😁ðy̆"], dtype="object"
),
},
)
| assert_frame_equal(df, expected, check_names=True) | pandas.testing.assert_frame_equal |
"""
Contains various methods used by Corpus components
"""
import pandas as pd
def get_utterances_dataframe(obj, selector = lambda utt: True,
exclude_meta: bool = False):
"""
Get a DataFrame of the utterances of a given object with fields and metadata attributes,
with an optional selector that filters for utterances that should be included.
Edits to the DataFrame do not change the corpus in any way.
:param exclude_meta: whether to exclude metadata
:param selector: a (lambda) function that takes a Utterance and returns True or False (i.e. include / exclude).
By default, the selector includes all Utterances that compose the object.
:return: a pandas DataFrame
"""
ds = dict()
for utt in obj.iter_utterances(selector):
d = utt.__dict__.copy()
if not exclude_meta:
for k, v in d['meta'].items():
d['meta.' + k] = v
del d['meta']
ds[utt.id] = d
df = pd.DataFrame(ds).T
df['id'] = df['_id']
df = df.set_index('id')
df = df.drop(['_id', '_owner', 'obj_type', 'user', '_root'], axis=1)
df['speaker'] = df['speaker'].map(lambda spkr: spkr.id)
meta_columns = [k for k in df.columns if k.startswith('meta.')]
return df[['timestamp', 'text', 'speaker', 'reply_to', 'conversation_id'] + meta_columns]
def get_conversations_dataframe(obj, selector = lambda convo: True,
exclude_meta: bool = False):
"""
Get a DataFrame of the conversations of a given object with fields and metadata attributes,
with an optional selector that filters for conversations that should be included.
Edits to the DataFrame do not change the corpus in any way.
:param exclude_meta: whether to exclude metadata
:param selector: a (lambda) function that takes a Conversation and returns True or False (i.e. include / exclude).
By default, the selector includes all Conversations in the Corpus.
:return: a pandas DataFrame
"""
ds = dict()
for convo in obj.iter_conversations(selector):
d = convo.__dict__.copy()
if not exclude_meta:
for k, v in d['meta'].items():
d['meta.' + k] = v
del d['meta']
ds[convo.id] = d
df = pd.DataFrame(ds).T
df['id'] = df['_id']
df = df.set_index('id')
return df.drop(['_owner', 'obj_type', '_utterance_ids', '_speaker_ids', 'tree', '_id'], axis=1)
def get_speakers_dataframe(obj, selector = lambda utt: True, exclude_meta: bool = False):
"""
Get a DataFrame of the Speakers with fields and metadata attributes, with an optional selector that filters
Speakers that should be included. Edits to the DataFrame do not change the corpus in any way.
:param exclude_meta: whether to exclude metadata
:param selector: selector: a (lambda) function that takes a Speaker and returns True or False
(i.e. include / exclude). By default, the selector includes all Speakers in the Corpus.
:return: a pandas DataFrame
"""
ds = dict()
for spkr in obj.iter_speakers(selector):
d = spkr.__dict__.copy()
if not exclude_meta:
for k, v in d['meta'].items():
d['meta.' + k] = v
del d['meta']
ds[spkr.id] = d
df = | pd.DataFrame(ds) | pandas.DataFrame |
#########################################################################################################
# @Author: --
# @Description: Retrieve Overpass data for specific key-values and create GeoJSON files
# @Usage: Create GeoJSON data for specific key value tags from OSM
#########################################################################################################
import json
import logging
import os.path
import time
import pandas as pd
from OSMPythonTools.overpass import overpassQueryBuilder
from RequestHandler import Configuration, RequestHandler
logging.basicConfig(level=logging.INFO)
BBOXES = {'UK': [49.9599, -7.572, 58.63500, 1.6815],
'IRL': [51.6692, -9.977084, 55.1317, -6.03299]}
OSM_TYPES1 = pd.read_csv('data/osm/osm_key_values.csv')
OSM_TYPES2 = | pd.read_csv('data/osm/osm_key_values_additional.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 14:42:02 2018
@author: rwilson
"""
import pandas as pd
import numpy as np
import scipy.linalg as linalg
import random
import os
import h5py
import matplotlib.pyplot as plt
import itertools
from numba import njit
from numba import prange
import os
import shutil
import gc
class utilities():
''' Some helper functions
'''
def src_rec_pairs(channels, exclude=None, reciprocity=False, randSample=None):
'''Generate a list of source receiver pairs for all excluding a certain
channels.
Parameters
----------
channels : list
list of channels from which src rec pairs should be generated
exclude : list (Default = None)
list of channels which should be excluded from the list of channels
reciprocity : bool (Default = False)
Include reciprocal pairs.
randSample : int (Default = None)
Extract a random subset from the list of length ``randSample``
Returns
-------
src_rec : list
list of unique source receiver pairs
'''
if reciprocity:
src_rec = [(i, j) for i in channels for j in channels if i!=j and
i!=np.all(exclude) and
j!=np.all(exclude)]
elif not reciprocity:
src_rec = [(i, j) for i in channels for j in channels if i!=j and
i!=np.all(exclude) and
j!=np.all(exclude) and
i<j]
if randSample:
return random.sample(src_rec, randSample)
else:
return src_rec
def read_channelPos(file, dimensions):
'''Read in csv containing each channel position. Currently expecting that
the channel position csv is of a specific type and needs shifting to bottom
zeroed coord. system.
Parameters
----------
file : str
Location of csv containing the channel locations
dimensions : dict
The ``height`` of the mesh.
Returns
-------
dfChan : DataFrame
Database of each channel location
'''
dfChan = pd.read_csv(file,
delim_whitespace=True, skiprows=2, usecols=[0,1,2,3,4])
dfChan.index = dfChan.index.droplevel()
dfChan.drop(inplace=True, columns=dfChan.columns[-2:].tolist())
dfChan.columns = ['x','y','z']
# Shift coords to mesh bottom zeroed
dfChan.z = dfChan.z + np.abs(dfChan.z.min()) + dimensions['height']/2 - np.abs(dfChan.z.min())
print('Channel Positions:\n', [(dfChan.iloc[i].x, dfChan.iloc[i].y, dfChan.iloc[i].z)
for i in range(dfChan.shape[0])])
print('Channel index:\n',[str(chan)
for _,chan in enumerate(dfChan.index.values)])
return dfChan
def HDF5_data_save(HDF5File, group, name, data, attrb={'attr': 0}, ReRw='w'):
'''Saves data into a hdf5 database, if data name already exists, then an
attempt to overwrite the data will be made
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
name : str
The name of the data to be saved within group
attrb : dict
attribute dictionary to store along with the database.
ReRw : str (Default = 'w')
The read/write format
'''
toscreen = '----- Attributes added to database %s %s, table %s ----- \n' \
%(HDF5File,group, name)
with h5py.File(HDF5File, ReRw) as f:
try:
dset = f.create_dataset(os.path.join(group, name), data=data, dtype='f')
print(toscreen)
for key,item in zip(attrb.keys(), attrb.values()):
print('Key:', key,'| item:', item)
dset.attrs[key] = item
except RuntimeError:
del f[os.path.join(group, name)]
dset = f.create_dataset(os.path.join(group, name), data=data, dtype='f')
print(toscreen)
for key,item in zip(attrb.keys(), attrb.values()):
print('Key:', key,'| item:', item)
dset.attrs[key] = item
def HDF5_data_del(HDF5File, group, names):
'''Deletes data from a hdf5 database within some group.
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
names : str
The names of the data groups to be deleted from ``group``
'''
with h5py.File(HDF5File, 'a') as f:
for name in names:
try:
path = os.path.join(group,name)
del f[path]
except KeyError:
print(name, "was not in", group)
def HDF5_data_read(HDF5File, group, name, ReRw='r'):
'''Saves data into a hdf5 database
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
attrb : tuple/list
attribute to store along with the database.
ReRw : str (Default = 'w')
The read/write format
Returns
-------
dset : ()
Data contained within group/name
'''
with h5py.File(HDF5File, ReRw) as f:
dset = f[os.path.join(group,name)].value
return dset
def HDF5_attri_read(HDF5File, group, name, ReRw='r'):
'''Read keys and attributes from hdf5 database.
Parameters
----------
HDF5File : str
Relative location of database
group : str
The expected group name
attrb : tuple/list
attribute to store along with the database.
ReRw : str (Default = 'w')
The read/write format
Returns
-------
dic : dict
A dictionary of all the attributes stored within the group/name.
'''
with h5py.File(HDF5File, ReRw) as f:
return {item[0]:item[1] for item in f[os.path.join(group,name)].attrs.items()}
def WindowTcent(TS, wdws):
'''Determine the centre of each correlation window in time from the input
time-series database.
Parameters
----------
TS : float
Sampling period
wdws : list(str)
Containing the windows range in sample points separated by -
'''
wdws_cent = [int(np.mean([int(wdw.split('-')[0]),
int(wdw.split('-')[1]) ])) for wdw in wdws]
wdws_cent = np.array(wdws_cent) * TS
return wdws_cent
def DiffRegress(Tseries, dfChan, Emaxt0, plotOut=False):
'''Perform linear regression to fit the 1D diffusion equation to an input
time series. The output of this function is an estimation of the
diffusivity and dissipation. (<NAME> et. al. 2001)
Parameters
----------
Tseries : array-like
The input time series
dfChan : DataFrame
Containing the channel positsion columns x, y, z
Emaxt0 : int
The index corresponding to the arrival time (onset of) maximum energy
Returns
-------
popt[1] : float
The diffusitivty determined from the least squared fit.
Units depends upon input t and z units check units
popt[2] : float
The Dissipation
'''
from scipy import optimize
# Determine absolute distance between source and receiver
recPos = dfChan.loc[Tseries['recNo']]
srcPos = dfChan.loc[Tseries['srcNo']]
absDist = np.sqrt(abs(recPos.x - srcPos.x)**2 +
abs(recPos.y - srcPos.y)**2 +
abs(recPos.z - srcPos.z)**2)
# Define the 1D diffusion equation, logE(z,t)
def diffusivity(t, z, D, sigma):
return np.log(1/(2*np.sqrt(np.pi*D))) \
- 0.5*np.log(t) - z**2/(4*D*t) - sigma*t
# The energy density
y_data = np.log(Tseries['Tseries']**2)[Emaxt0:]
# The time axis zeroed to the onset of Emaxt0
x_data = (np.arange(0, Tseries['TracePoints']) *
Tseries['TSamp'])[Emaxt0-1:]
x_data = (x_data-x_data[0])[1:]
popt, pcov = optimize.curve_fit(diffusivity,
x_data,
y_data,
p0=[absDist, 1, 1],
bounds=([absDist*0.9, 0.1, 0.1],
[absDist*1.1, np.inf, np.inf]))
if plotOut:
# Plot the resulting fit
plt.figure(figsize=(6, 4))
plt.scatter(x_data, y_data, label='Data')
plt.plot(x_data, diffusivity(x_data, popt[0], popt[1], popt[2]),
label='Fitted function', color='red')
plt.legend(loc='best')
plt.show()
return popt[1], popt[2]
def src_recNo(CCdata):
'''Extract the source receiver paris within CCdata, excluding common pairs.
Parameters
----------
CCdata : dataframe
CCdata dataframe
Returns
-------
src_recNo : list
List of the source receiver numbers
'''
src_rec = list(sorted(set(
[(srcNo, recNo) for srcNo, recNo in
zip(CCdata.index.get_level_values('srcNo'),
CCdata.index.get_level_values('recNo')) if
srcNo != recNo]
)))
return src_rec
def traceAttributes(SurveyDB, Col):
'''Extract a single trace and its attributes from single survey dataframe,
into a dictionary.
Parameters
----------
TStrace : DataFrame
Containing all traces for a single survey.
Col : int
The column to extract from the database.
Returns
-------
traceDict: dict
Containing the trace along with all header information.
'''
traceDict = {key: SurveyDB.columns.get_level_values(key)[Col] for key in
SurveyDB.columns.names}
traceDict['Tseries'] = SurveyDB.iloc[:, Col].values
return traceDict
def d_obs_time(CCdata, src_rec, lag, window, parameter='CC', staTime=None, stopTime=None):
'''Construct the d_obs dataframe over time from the input CCdata, for a
select list of source-receiver pairs.
Parameters
----------
CCdata : dataframe
CCdata dataframe
src_rec : list(tuples)
A list of tuples for each source receiver pair.
lag : list(tuples)
The lag value from which the extraction is made.
window : str/list(str)
string of windows or list of str of windows.
parameter : str (Default='CC')
Parameter from which to extract from the dataframe
staTime : str
The start time from which ``d_obs`` is extracted
stopTime : str
The stop time before which ``d_obs`` is extracted.
Returns
-------
d_obs_time : dataframe
dataframe containing the in each row the d_obs vector for all requested
source-receiver pairs, increasing with time.
'''
if staTime and stopTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) >
pd.to_datetime(staTime)) & \
(pd.to_datetime(CCdata.index.get_level_values('Time')) <
pd.to_datetime(stopTime))
CCdata = CCdata.copy().loc[mask]
elif staTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) >
pd.to_datetime(staTime))
CCdata = CCdata.copy().loc[mask]
elif stopTime:
mask = (pd.to_datetime(CCdata.index.get_level_values('Time')) <
pd.to_datetime(stopTime))
CCdata = CCdata.copy().loc[mask]
# Time index for each survey based on second src_rec pair.
time_index = np.array([0])
for sr in src_rec:
index = pd.to_datetime(CCdata.loc[([sr[0]], [sr[1]]),
(lag, window[0], parameter)].
unstack(level=[0, 1]).index)
if index.shape[0]>time_index.shape[0]:
time_index = index
if len(window)>1:
temp = []
for wdw in window:
df = pd.concat([CCdata.loc[([sr[0]], [sr[1]]),
(lag, wdw, parameter)].
unstack(level=[0, 1]).
reset_index(drop=True) for
sr in src_rec], axis=1)
temp.append(df)
d_obs_time = pd.concat(temp, axis=1)
else:
d_obs_time = pd.concat([CCdata.loc[([sr[0]], [sr[1]]),
(lag, window, parameter)].
unstack(level=[0, 1]).
reset_index(drop=True) for
sr in src_rec], axis=1)
d_obs_time.index = time_index
return d_obs_time.dropna().astype(float)
def measNorange(CCdata, staTime, endTime):
'''Determines the measurement survey number between given time interval.
This function is intended to allow the user to quickly determine the measurement
number range of interest, thereby allowing the reporcessing of the raw data
over this region only. This requires that the user passes a CCdata which
represents the entire raw dataset.
Parameters
----------
CCdata : dataframe
CCdata dataframe
staTime : str
The start time of the interval/.
endTime : str
The start time of the interval/.
Returns
-------
None : tuple
Measurement survey numbers within the range given.
'''
mask = (pd.to_datetime(CCdata.index.get_level_values('Time').values) > pd.to_datetime(staTime)) & \
(pd.to_datetime(CCdata.index.get_level_values('Time').values) < pd.to_datetime(endTime))
measNo = [i for i, x in enumerate(mask) if x]
return (measNo[0], measNo[-1])
def surveyNorange(TSsurveys, staTime, endTime):
'''Determines the survey number between given time interval.
This function is intended to allow the user to quickly determine the survey
number range of interest, thereby allowing the reporcessing of the raw data
over this region only. This requires that the user passes a CCdata which
represents the entire raw dataset.
Parameters
----------
TSsurveys : list
The survey folder numbers
staTime : str
The start time of the interval/.
endTime : str
The start time of the interval/.
Returns
-------
None : tuple
Measurement survey numbers within the range given.
'''
surveyTimes = [pd.to_datetime(group.split('survey')[1]) for group in
TSsurveys]
mask = [(group > pd.to_datetime(staTime)) and
(group < pd.to_datetime(endTime)) for group in
surveyTimes]
TSsurveys = list(itertools.compress(TSsurveys, mask))
surveyNo = [i for i, x in enumerate(mask) if x]
return (surveyNo[0], surveyNo[-1])
def Data_on_mesh(mesh_obj, data, loc=''):
'''Place data onto the mesh, and store each mesh file in subfolder.
Parameters
----------
mesh_obj : mesh.object
mesh object as generated by the class mesh, containing the mesh as well
as the functions for setting and saving that mesh.
data : array
Data containing mesh data stored in rows for each time step in columns.
Separate mesh file will be generated for each ``n`` column.
loc : str (Default = '')
Location to which the output vtu files will be saved.
'''
if loc=='':
loc = 'inv_on_mesh/'
exists = os.path.isdir(loc)
if exists:
shutil.rmtree(loc)
os.makedirs(loc)
else:
os.makedirs(loc)
# Extract the Kt values for a single src rev pair and save to mesh
for idx, tstep in enumerate(data.T):
# Calculate the decorrelation values
mesh_obj.setCellsVal(tstep)
# Save mesh to file
mesh_obj.saveMesh(os.path.join(loc,'mesh%s' % idx))
# self.mesh_obj.saveMesh(os.path.join(loc,'Kernel%s_%s_No%s' % (srcrec[0], srcrec[1],idx)))
def inversionSetup(mesh_param, channelPos, noise_chan, drop_ch,noiseCutOff,database,
CCdataTBL, Emaxt0, TS_idx, inversion_param, max_src_rec_dist = None,
verbose=False):
'''Run function intended to cluster the steps required to setup the
inversion mesh and the associated sensitivity kernels. If the mesh_param
dictionary key "makeMSH" is true, a new mesh will be constructed,
otherweise an attempt to load it from disk will be made.
Parameters
----------
mesh_param : dict
The height, radius, char_len, makeMSH (bool) of the mesh
channelPos : str
Location of the csv contraining the channel positions, same units as
provided in the ``mesh_param``
noise_chan : list/None
Channels which are pure noise, and will be used to determine the SNR.
if ``None`` is given no attempt to calculate the SNR will be made.
drop_ch : list
Channels which should be dropped. Pass ``None`` or ``False`` to skip.
noiseCutOff : float (Default=10.91)
The noise cutoff in dB
database : str
The location of the processed data ``database.h5``.
CCdataTBL : str
The name of the database table eg:``CCprocessedFixed``
Emaxt0 : int
The arrival time in number of sample points at which max. s-wave energy
arrives. Used to perfrom the regression to determine the diffusivity.
TS_idx : int
The index of the survey which is to be used for SNR calculation.
inversion_param : dict
sigma "The scattering cross-section perturbation size in [area]",
c "velocity of the core". If sigma is provided than the value
determined from diffusion regression will be overwritten.
lag "The lag value (fixed or rolling) intended to feed into ``d_obs``
calcKernels "If true calculate the sens. kernels, otherweise skip and database won't be
overwritten."
wdws_sta "remove the first n windows from the inversion d_obs data, default = 0"
max_src_rec_dist : float (default = None)
Max source/receiver separation distance. Any pairs greater than this will be removed
from the CCdata.
verbose : Bool (default = False)
True for the most verbose output to screen.
Returns
-------
CWD_Inversion.h5 : hdf5 database
The output G matrix holding all sensntivity kernels for each src/rec
pair are written to the database for use in the inversion.
setupDict : dict
Containing modified or produced data for the inversion.
'''
import postProcess as pp
import mesh as msh
import data as dt
# ------------------- Apply Defaults -------------------#
default_dict = dict(sigma = None, wdws_sta = 0, wdws_end = None)
for k, v in default_dict.items():
try:
inversion_param[k]
except KeyError:
inversion_param[k] = v
# ------------------- Mesh the Inversion Space -------------------#
if mesh_param['makeMSH']:
clyMesh = msh.mesher(mesh_param) # Declare the class
clyMesh.meshIt() # Create the mesh
clyMesh.meshOjtoDisk() # Save mesh to disk
else:
clyMesh = msh.mesher(mesh_param) # Declare the class
clyMesh = clyMesh.meshOjfromDisk() # Load from desk,
# ------------------- Calculate the Kij -------------------#
# Read in channel datafile
dfChan = utilities.read_channelPos(channelPos, mesh_param)
# Load in single survey details
TSsurveys = dt.utilities.DB_group_names(database, 'TSdata')
TSsurvey = dt.utilities.\
DB_pd_data_load(database,
os.path.join('TSdata',
TSsurveys[TS_idx]))
# Read in the src/rec pairs
CCdata = dt.utilities.DB_pd_data_load(database, CCdataTBL)
# Calculate the window centre of each window in time
TS = TSsurvey.columns.get_level_values('TSamp').unique().tolist()[0]
wdws = CCdata.columns.get_level_values('window').unique()\
.tolist()[inversion_param['wdws_sta']:inversion_param['wdws_end']]
wdws_cent = utilities.WindowTcent(TS, wdws)
# Remove noisy channels
if noise_chan:
noiseyChannels, SNR, NoiseTraces = pp.\
post_utilities.\
calcSNR(TSsurvey,
noise_chan,
dfChan.index.values,
wdws,
noiseCutOff, inspect=verbose)
CCdata = pp.post_utilities.CC_ch_drop(CCdata, noiseyChannels, errors='ignore')
else:
noiseyChannels = None
src_rec = utilities.src_recNo(CCdata)
if max_src_rec_dist:
# List of src and rec
src = [src[0] for src in src_rec]
rec = [rec[1] for rec in src_rec]
# Create df with the src rec positions used (i.e. found in self.src_rec)
srR_df = pd.DataFrame({'src': src, 'rec': rec})
srR_df = pd.concat([srR_df, dfChan.loc[srR_df['src']]. \
rename(columns = {'x':'sx', 'y':'sy', 'z':'sz'}). \
reset_index(drop=True)], axis=1)
srR_df = pd.concat([srR_df, dfChan.loc[srR_df['rec']]. \
rename(columns = {'x':'rx', 'y':'ry', 'z':'rz'}). \
reset_index(drop=True)], axis=1)
# Assign the R value
R_vals =np.linalg.norm(dfChan.loc[src].values -
dfChan.loc[rec].values, axis=1)
srR_df['R'] = R_vals
ch_far = srR_df.loc[srR_df['R'] > max_src_rec_dist][['src', 'rec']].values.tolist()
CCdata = pp.post_utilities.CC_ch_drop(CCdata, ch_far, errors='ignore')
src_rec = utilities.src_recNo(CCdata)
else:
srR_df = None
if drop_ch:
pp.post_utilities.CC_ch_drop(CCdata, drop_ch, errors='ignore')
src_rec = utilities.src_recNo(CCdata)
# The linear regression for D and sigma using a trace selected mid-way
trace = utilities.traceAttributes(TSsurvey, TSsurvey.shape[1]//2)
if verbose:
plt.figure(figsize=(7, 2))
plt.plot(np.log(trace['Tseries']**2))
D, sigma_temp = utilities.DiffRegress(trace, dfChan,
Emaxt0, plotOut=verbose)
# ------------------- Deal with some param types -------------------#
if inversion_param['sigma'] is None:
inversion_param['sigma'] = sigma_temp
if inversion_param['wdws_end'] is None:
inversion_param['wdws_end'] = 'None'
# Unity medium parameters
inversion_param['D'] = D
print('\n-------- Applied Kernel Parameters --------\n',
'D = %g : sigma = %g, : c = %g \n' %(D,
inversion_param['sigma'],
inversion_param['c']))
if 'calcKernels' in inversion_param.keys() and inversion_param['calcKernels']:
# decorrelation ceofficient for each tet centre, each src/rec
Kth = decorrTheory(src_rec, dfChan, clyMesh.cell_cent,
wdws_cent, inversion_param, clyMesh)
# Generate the required kernel matrix for inversion
Kth.Kt_run()
# Place the kernels on the mesh
Kth.K_on_mesh()
# ------------------- determine d_obs -------------------#
d_obs = utilities.d_obs_time(CCdata, src_rec, inversion_param['lag'],
wdws, staTime=None,
stopTime=None)
utilities.HDF5_data_save('CWD_Inversion.h5', 'Inversion', 'd_obs',
d_obs.values, {'wdws': [x.encode('utf-8') for x in wdws],
'lag': inversion_param['lag'],
'Times': [a.encode('utf8') for a in d_obs.index.strftime("%d-%b-%Y %H:%M:%S.%f").values.astype(np.str)]},
ReRw='r+')
# ------------------- Calculate initial tuning param -------------------#
print('\n-------- Initial turning parameters --------')
inversion_param['lambda_0'] = inversion_param['c'] / inversion_param['f_0']
if 'L_0' not in inversion_param.keys():
inversion_param['L_0'] = 8 * inversion_param['lambda_0'] # Planes 2015
print('lambda_0 = %g' % inversion_param['lambda_0'] )
print('L_0 = %g' % inversion_param['L_0'])
print('L_c = %g' % inversion_param['L_c'])
try:
print('The user defined sigma_m = %g will be applied'
% inversion_param['sigma_m'])
except KeyError:
inversion_param['sigma_m'] = 1e-4 * \
inversion_param['lambda_0']/10**2 # Planes 2015
print('Calculated sigma_m = %g will be applied'
% inversion_param['sigma_m'])
# Store various info in dict
return {'CCdata' :CCdata, 'src_rec' : src_rec, 'dfChan' : dfChan,
'd_obs' : d_obs, 'wdws': wdws, 'cell_cents': clyMesh.cell_cent,
'noiseyChannels': noiseyChannels, 'srR_df': srR_df,
**inversion_param}
def inv_on_mesh(hdf5File):
'''Read in all the ``m_tilde`` groups within the provided hdf5 database file and places them
onto the vtu mesh. of multiple groups are found then each will be saved into a different
folder. Each folder will contain the inversion results for each time step
Parameters:
-----------
mesh_param : dict
The height, radius, char_len, makeMSH (bool) of the mesh
hdf5File : str
The name and relative location of the database containing all inversion results.
'''
import postProcess as pp
import mesh as msh
import data as dt
# Load the mesh
# clyMesh = msh.mesher(mesh_param) # Declare the class
clyMesh = msh.utilities.meshOjfromDisk() # Read the mesh from disk
# Load in the model param
groups = dt.utilities.DB_group_names(hdf5File, 'Inversion')
m_tildes_groups = [s for s in groups if "m_tildes" in s]
for folder in m_tildes_groups:
# Read in the inversion
m_tilde = utilities.HDF5_data_read(hdf5File, 'Inversion', folder)
# Place onto mesh
utilities.Data_on_mesh(clyMesh, m_tilde, 'inv_'+folder+'/' )
def l1_residual(hdf5File, someInts):
'''Write function that calculates the l1 norm and residual from each m_tilde inversion.
Should include ability to plot the resulting L_curve, and append new results to a database
Parameters
----------
hdf5File : dict
The name and relative location of the database containing all inversion results.
someInts : str
The ....
'''
# Calculate the l1 norm
l1[idx] = np.sum(np.abs(m_tilde))
# Calculate the residual error
ErrorR[idx] = np.sqrt(np.sum((d_obs - G.dot(m_tilde))**2))
def inv_plot_tsteps(hdf5File, zslice_pos=1, subplot=True, plotcore=False, PV_plot_dict=None,
):
'''Creates plot of each time-step from different inversion results, for all inversion
folders found within root directory.
Parameters
----------
hdf5File : str
The hdf5 file from which each inversion result will be read
mesh_param : str
The hdf5 file from which each inversion result will be read
sliceOrthog : bool (default=True)
Perform orthogonal slicing of the mesh.
subplot : bool (default=True)
Place all time-steps on single subplot, else each time step will be placed on an
individual figure.
plotcore : bool (default=False)
Plot the core surface
PV_plot_dict : dict (default=None)
Dictionary containing dict(database='loc_database', y_data='name_ydata'). If given
a small plot will of the PVdata over time, with the current time annotated.
Notes
-----
'''
import pyvista as pv
import numpy as np
import matplotlib.pyplot as plt
import glob
from os.path import join
import mesh as msh
import data as dt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import data as dt
if PV_plot_dict:
PVdata = dt.utilities.DB_pd_data_load(PV_plot_dict['database'], 'PVdata')
PVdata.index = pd.to_datetime(PVdata['Time Stamp'])
os.makedirs('figs',exist_ok=True)
# Load mesh object
clyMesh = msh.utilities.meshOjfromDisk()
# Load in the model param
groups = dt.utilities.DB_group_names(hdf5File, 'Inversion')
invFolders = [s for s in groups if "m_tildes" in s]
for invfolder in invFolders:
# invinFolder = glob.glob(invfolder+"*.vtu")
# meshes = [pv.read(inv) for inv in invinFolder]
m_tildes = utilities.HDF5_data_read(hdf5File, 'Inversion', invfolder)
# Load in the attributes,
attri = utilities.HDF5_attri_read(hdf5File, 'Inversion', invfolder)
# TODO:
# Plot the standard attribute common to all , and then update the time for each
attri['Times'] = [time.decode('UTF-8') for time in attri['Times']]
plot_rows = m_tildes.shape[1]
# Create mesh for each time-step
meshes = []
for time, m_tilde in enumerate(m_tildes.T):
# The apply data to the mesh
clyMesh.setCellsVal(m_tilde)
# clyMesh.cell_data['tetra']['gmsh:physical'] = m_tilde[1]
clyMesh.saveMesh('baseMesh')
# Load in base mesh
meshes.append(pv.read('baseMesh.vtu'))
# meshes['mesh'+str(time)] = baseMesh
# meshes['slice'+str(time)] = baseMesh.slice_orthogonal(x=None, y=None, z=zslice_pos)
#
all_dataRange = (min([mesh.get_data_range()[0] for mesh in meshes]),
max([mesh.get_data_range()[1] for mesh in meshes]))
# Slice each mesh
slices = [mesh.slice_orthogonal(x=None, y=None, z=zslice_pos) for mesh in meshes]
col = 2 if plotcore else 1
if subplot:
p = pv.Plotter(shape=(plot_rows, col), border=False, off_screen=True)
p.add_text(invfolder)
for time, _ in enumerate(m_tildes.T):
print('Time:',time)
p.subplot(0, time)
p.add_mesh(slices[time], cmap='hot', lighting=True,
stitle='Time %g' % time)
p.view_isometric()
if plotcore:
p.subplot(1, time)
p.add_mesh(slices[time], cmap='hot', lighting=True,
stitle='Time %g' % time)
p.view_isometric()
p.screenshot(invfolder.split('/')[0]+".png")
else:
for time, _ in enumerate(m_tildes.T):
p = pv.Plotter(border=False, off_screen=True)
p.add_text('Time: %s\n L_c: %g\n sigma_m: %g\n rms_max: %g' \
% (attri['Times'][time],
attri['L_c'],
attri['sigma_m'],
attri['rms_max']),
font_size=12)
p.add_text(str(time), position=(10,10))
p.add_mesh(slices[time], cmap='hot', lighting=True,
stitle='Time %g' % time,
clim=all_dataRange,
scalar_bar_args=dict(vertical=True))
file_name = invfolder.split('/')[0]+'_'+str(time)+".png"
p.screenshot(file_name)
if PV_plot_dict:
idx_near = PVdata.index[PVdata.index.get_loc(attri['Times'][time],
method='nearest')]
y_time = PVdata[PV_plot_dict['y_data']].loc[idx_near]
img = plt.imread(file_name)
fig, ax = plt.subplots()
x = PVdata['Time Stamp']
y = PVdata[PV_plot_dict['y_data']]
ax.imshow(img, interpolation='none')
ax.axis('off')
axins = inset_axes(ax, width="20%", height="20%", loc=3, borderpad=2)
axins.plot(x, y, '--', linewidth=0.5, color='white')
axins.grid('off')
# Set some style
axins.patch.set_alpha(0.5)
axins.tick_params(labelleft=False, labelbottom=False)
axins.set_xlabel("time", fontsize=8)
axins.xaxis.label.set_color('white')
axins.tick_params(axis='x', colors='white')
axins.yaxis.label.set_color('white')
axins.tick_params(axis='y', colors='white')
# Vertical line point
axins.scatter(x=idx_near, y=y_time, color='white')
fig.savefig(os.path.join('figs',file_name),
bbox_inches = 'tight',
pad_inches = 0,
dpi = 'figure')
plt.close()
def PV_INV_plot_final(hdf5File, fracture=None, trim=True, cbarLim=None,
t_steps=None, SaveImages=True, down_sample_figs=False, PV_plot_dict=None,
plane_vectors=([0.5, 0.5, 0], [0, 0.5, 0.5], [0, 0, 0]),
):
'''Intended for the creation of final images for publications. Includes the output of white
space trimed png's and the a csv database of perturbation data corresponding to each
time-step. Spatial autocorrelation analysis available through ``statBall``.
Parameters
----------
hdf5File : str
The hdf5 file from which each inversion result will be read
fracture : list(object) (default=None)
Aligned pyvista mesh object to be included into each time-step plot.
trim : bool (default=True)
Trim white space from each png produced.
cbarLim : tuple (default=None)
Fix the global colour bar limits (min, max),
t_steps : list (default=None)
Select timesteps to display. Produces a filtered PV_INV database of a selection of tsteps
SaveImages: bool/int (default=True)
Produce all time-step images found in hdf5file.
down_sample_figs: bool (default=False)
Skip every ``down_sample_figs`` figure to save to disk
PV_plot_dict : dict (default=None)
Dictionary containing dict(database='loc_database', PVcol='name_ydata') defining the
location of the raw input database and the PV data col.
Additional inputs are:
dict(axis_slice='y', frac_slice_origin=(0,0.1,0),
pointa=None, pointb=None, # The start and end point of a sampling line
ball_rad=None, ball_pos=None # list of radius and position of a sampling sphere
ball_shift_at=None # ([0,3,0], 27) shift ball at time step 27
area = 27 # the area from which stress will be calculated,
len_dia = (85, 39)( # initial length and diameter of the sample
LVDT_a = "name of axial LVDTs", can be a list in which case the average is calculated
LVDT_r = "name of radial LVDTs", can be a list in which case the average is calculated
frac_onset_hours = 103 # The hours at which fracturing onset occures, used to cacluate
the percentate of peak stress at which fracturing occurs.
statBallrad = Radius of the sphere whichin which Getis and Ord local G test is calculated
statBallpos = Position of the sphere whichin which Getis and Ord local G test is calculated
distThreshold = threshold used when calculating the distance weights
)
plane_vectors : tuple(list(v1,v2,origin), list()) (default=([0.5,0.5,0],[0,0.5,0.5]), [0, 0, 0])
two non-parallel vectors defining plane through mesh. ([x,y,z], [x,y,z]). If a list of
tuples is provided multiple slices of the domain will be made and saved to file.
Notes
-----
'''
import pyvista as pv
import matplotlib.pyplot as plt
import mesh as msh
import CWD as cwd
import data as dt
from string import ascii_uppercase
# ------------------- Internal functions -------------------#
def staggered_list(l1, l2):
l3 = [None]*(len(l1)+len(l2))
l3[::2] = l1
l3[1::2] = l2
return l3
def viewcam(handel, axis_slice):
if axis_slice =='y':
return handel.view_xz(negative=True)
elif axis_slice =='x':
return handel.view_yz(negative=True)
elif axis_slice =='z':
return handel.view_xy(negative=False)
def core_dict(axis_slice):
if axis_slice == 'y':
return dict(show_xaxis=True, show_yaxis=False, show_zaxis=True, zlabel='Z [mm]',
xlabel='X [mm]', ylabel='Y [mm]', color='k')
if axis_slice == 'x':
return dict(show_xaxis=False, show_yaxis=True, show_zaxis=True, zlabel='Z [mm]',
xlabel='X [mm]', ylabel='Y [mm]', color='k')
if axis_slice == 'z':
return dict(show_xaxis=True, show_yaxis=True, show_zaxis=False, zlabel='Z [mm]',
xlabel='X [mm]', ylabel='Y [mm]', color='k')
# ------------------- Apply Defaults -------------------#
default_dict = dict(sigma = None, axis_slice='y', frac_slice_origin=(0,0.1,0),
pointa=None, pointb=None, ball_rad = None, ball_pos=None,
ball_shift_at=None, area = None, len_dia = None, LVDT_a = None,
LVDT_r = None,
frac_onset_hours=0,
anno_lines= None,
channels=None,
statBallrad=None,
statBallpos=None,
distThreshold=5)
for k, v in default_dict.items():
try:
PV_plot_dict[k]
except KeyError:
PV_plot_dict[k] = v
if not isinstance(fracture, list):
fracture = [fracture]
# Load in the model param
groups = dt.utilities.DB_group_names(hdf5File, 'Inversion')
invfolder = [s for s in groups if "m_tildes" in s][0]
# Extract out the m tildes
m_tildes = cwd.utilities.HDF5_data_read(hdf5File, 'Inversion', invfolder)
# Load in the attributes,
attri = cwd.utilities.HDF5_attri_read(hdf5File, 'Inversion', invfolder)
attri['Times'] = [time.decode('UTF-8') for time in attri['Times']]
# Make output folder
folder = 'final_figures'
os.makedirs(folder, exist_ok=True)
# Place each m_tilde onto the mesh
inversions, all_dataRange= cwd.utilities.genRefMesh(m_tildes,
pathRef='cly.Mesh',
pathGen='baseMesh')
# # Load mesh
# clyMesh = msh.utilities.meshOjfromDisk()
# # Save the base mesh
# for idx,col in enumerate(m_tildes.T):
# if col[0]>0:
# break
# clyMesh.setCellsVal(m_tildes.T[idx])
# clyMesh.saveMesh('baseMesh')
# # Read in base mesh
# inversions = pv.read('baseMesh.vtu')
# # Deal with padded zeros
# pad = np.argwhere(inversions.cell_arrays['gmsh:physical']>0)[0][0]
# all_dataRange = [0,0]
# # Add each time-step to the mesh and determine the data range
# for time, m_tilde in enumerate(m_tildes.T):
# inversions.cell_arrays['time%g' % time] = np.pad(m_tilde, (pad, 0), 'constant')
# if inversions.cell_arrays['time%g' % time].min() < all_dataRange[0]:
# all_dataRange[0] = inversions.cell_arrays['time%g' % time].min()
# if inversions.cell_arrays['time%g' % time].max() > all_dataRange[1]:
# all_dataRange[1] = inversions.cell_arrays['time%g' % time].max()
if cbarLim:
all_dataRange=cbarLim
# Calculate slice through mesh
if isinstance(plane_vectors, tuple):
v = np.cross(plane_vectors[0], plane_vectors[1])
v_hat = v / (v**2).sum()**0.5
elif isinstance(plane_vectors, list):
v_hat = []
for planes in plane_vectors:
v = np.cross(planes[0], planes[1])
v_hat.append(v / (v**2).sum()**0.5)
#----------------- Plot data to disk -----------------
slice_ax = inversions.slice(normal=PV_plot_dict['axis_slice'])
frac_y = [frac.slice(normal=PV_plot_dict['axis_slice'],
origin=PV_plot_dict['frac_slice_origin']) for frac in fracture]
# frac_y = fracture.slice(normal=PV_plot_dict['axis_slice'],
# origin=PV_plot_dict['frac_slice_origin'])
if isinstance(v_hat, list):
frac_slices = []
for idx, v in enumerate(v_hat):
frac_slices.append( inversions.slice(normal=v, origin=(plane_vectors[idx][2])) )
else:
frac_slices = [inversions.slice(normal=v_hat, origin=(plane_vectors[2]))]
# ----------------- Create Sample over line -----------------
if PV_plot_dict['pointa'] and PV_plot_dict['pointb']:
# Sample over line within fracture
frac_line = pv.Line(pointa=PV_plot_dict['pointa'],
pointb=PV_plot_dict['pointb'])
frac_line_vals = frac_line.sample(inversions)
frac_line_DF = pd.DataFrame(index=range(frac_line_vals.n_points))
else:
frac_line_DF = None
# ----------------- Create Sample at sphere -----------------
if PV_plot_dict['ball_rad'] and PV_plot_dict['ball_pos']:
# Sample volume within sphere
ball_dict = {}
for idx, (rad, pos) in enumerate(zip(PV_plot_dict['ball_rad'], PV_plot_dict['ball_pos'])):
ball_dict['ball_%g' %(idx+1)] = pv.Sphere(radius=rad, center=pos)
ball_dict['ball_%g_vals' %(idx+1)] = ball_dict['ball_%g' %(idx+1)].sample(inversions)
ball_dict['ball_%g_DF' %(idx+1)] = pd.DataFrame(
index=range(ball_dict['ball_%g' %(idx+1)].n_points))
else:
ball_dict = None
# ----------- Create spatial statistics within sphere -----------
if PV_plot_dict['statBallrad'] and PV_plot_dict['statBallpos']:
import pysal
from esda import G_Local
# from pysal.explore.esda.getisord import G_Local
statBall_dict = {}
ball = pv.Sphere(radius=PV_plot_dict['statBallrad'],
center=PV_plot_dict['statBallpos'],
theta_resolution=20,
phi_resolution=20)
clipped = inversions.clip_surface(ball, invert=True)
# Extract from clipped
cellTOpoint = clipped.cell_data_to_point_data()
statBall_dict['staball_coords'] = cellTOpoint.points
# Now calc the spatial statistics
statBall_dict['staball_w'] = pysal.lib.weights.DistanceBand(statBall_dict['staball_coords'],
threshold=PV_plot_dict['distThreshold'])
statBall_dict['staball_p_val'] = []
# ----------------- Perform save for each t-step -----------------
times = [time for time, _ in enumerate(m_tildes.T)]
count = 0
if down_sample_figs:
print('* Down Sample figures')
times_down = times[0::down_sample_figs]
times_down = times_down + t_steps
times_down = list(set(times_down))
times_down.sort()
else:
times_down = times
for time in times:
# Extract samples from domain
if isinstance(frac_line_DF, pd.DataFrame):
frac_line_DF['time%g' % time] = frac_line_vals['time%g' % time]
if ball_dict:
if PV_plot_dict['ball_shift_at']:
for shifts in PV_plot_dict['ball_shift_at']:
if time == shifts[1]:
for no, shift in enumerate(shifts[0]):
ball_dict['ball_%g' %(no+1)].translate(shift)
ball_dict['ball_%g_vals' %(no+1)] = ball_dict['ball_%g_vals' %(no+1)].sample(inversions)
for ball, _ in enumerate(PV_plot_dict['ball_rad']):
ball_dict['ball_%g_DF' %(ball+1)]['time%g' % time] = ball_dict['ball_%g_vals' %(ball+1)]['time%g' % time]
# ball_DF['time%g' % time] = ball_vals['time%g' % time]
if statBall_dict:
np.random.seed(10)
lg = G_Local(cellTOpoint['time%g' % time],
statBall_dict['staball_w'],
transform='B')
statBall_dict['staball_p_val'].append(lg.p_sim[0])
# statBall_dict['staball_DF'][0, 'time%g' % time] = lg.p_sim[0]
if SaveImages and time==times_down[count]:
count+=1
if count == len(times_down):
count-=1
for idx, view in enumerate([slice_ax]+frac_slices):
p = pv.Plotter(border=False, off_screen=False)
p.add_mesh(view, cmap='hot', lighting=True,
stitle='Time %g' % time,
scalars=view.cell_arrays['time%g' % time],
clim=all_dataRange,
scalar_bar_args=dict(vertical=True))
for fracy in frac_y:
p.add_mesh(fracy)
if idx == 0:
p.view_xz(negative=True)
elif idx >= 1:
if isinstance(v_hat, list):
p.view_vector(v_hat[idx-1])
else:
p.view_vector(v_hat)
p.remove_scalar_bar()
p.set_background("white")
file_name = os.path.join(folder, invfolder)+'_view%g_%g.png' % (idx, time)
p.screenshot(file_name)
# ----------------- Make colorbar -----------------
p = pv.Plotter(border=False, off_screen=False)
p.add_mesh(slice_ax, cmap='hot', lighting=True,
stitle='Time %g' % time,
scalars=slice_ax.cell_arrays['time0'],
clim=all_dataRange,
scalar_bar_args=dict(vertical=False,
position_x=0.2,
position_y=0,
color='black'))
p.view_xz(negative=True)
p.screenshot(os.path.join(folder, 'cbar.png'))
# ----------------- core example -----------------
clipped = inversions.clip(PV_plot_dict['axis_slice'])
if isinstance(plane_vectors, tuple):
plane_draw = list(plane_vectors)
elif isinstance(plane_vectors, list):
plane_draw = plane_vectors
a_s = []
b_s = []
lines = []
if PV_plot_dict['anno_lines']:
for plane in PV_plot_dict['anno_lines']:
a = plane[0]
b = plane[1]
a_s.append(a)
b_s.append(b)
lines.append(pv.Line(a, b))
p = pv.Plotter(border=False, off_screen=False,window_size=[1024*4, 768*4])
# p.add_mesh(inversions, opacity=0.1, edge_color ='k')
p.add_mesh(clipped, opacity=0.3, edge_color ='k',lighting=True,
scalars=np.ones(clipped.n_cells))
for frac in fracture:
p.add_mesh(frac,lighting=True)
# p.add_mesh(fracture, lighting=True)
for idx, (line, a, b) in enumerate(zip(lines, a_s, b_s)):
p.add_mesh(line, color="white", line_width=5)
flat_list = [item for sublist in PV_plot_dict['anno_lines'] for item in sublist]
labels = list(ascii_uppercase[:len(flat_list)])
p.add_point_labels(
flat_list, labels, font_size=120, point_color="red", text_color="k")
if isinstance(PV_plot_dict['channels'], pd.DataFrame):
for index, row in PV_plot_dict['channels'].iterrows():
ball = pv.Sphere(radius=1, center=row.tolist())
p.add_mesh(ball, 'r')
# poly = pv.PolyData(PV_plot_dict['channels'].values)
# p.add_points(poly, point_size=20)
p.set_background("white")
viewcam(p, PV_plot_dict['axis_slice'])
# p.show_bounds(font_size=20,
# location='outer',
# padding=0.005,**core_dict(PV_plot_dict['axis_slice']))
p.remove_scalar_bar()
file_name = os.path.join(folder, invfolder)+'_coreview.png'
p.screenshot(file_name)
# ----------------- trim whitespace -----------------
if trim:
utilities.whiteSpaceTrim(folderName=folder, file_match=invfolder+'*.png')
# ----------------- Save requested data to disk -----------------
PVdata = dt.utilities.DB_pd_data_load(PV_plot_dict['database'], 'PVdata')
PVdata['refIndex'] = PVdata.index
PVdata['hours'] = pd.to_timedelta(PVdata['Time Stamp']-PVdata['Time Stamp'][0]). \
dt.total_seconds()/3600
PVdata.index = pd.to_datetime(PVdata['Time Stamp'])
if PV_plot_dict['area']:
PVdata['Stress (MPa)'] = PVdata[PV_plot_dict['PVcol']]/PV_plot_dict['area']*1000
PeakStress = PVdata['Stress (MPa)'].max()
fracStress = PVdata.query('hours>=%g' % PV_plot_dict['frac_onset_hours'])['Stress (MPa)'].iloc[0]
percOfPeak = fracStress/PeakStress
if PV_plot_dict['len_dia'] and PV_plot_dict['LVDT_a']:
PVdata['strainAx'] = PVdata[PV_plot_dict['LVDT_a']].mean(axis=1)/PV_plot_dict['len_dia'][0]
if PV_plot_dict['LVDT_r']:
PVdata['strainVol'] = PVdata['strainAx'] - PVdata[PV_plot_dict['LVDT_r']]/PV_plot_dict['len_dia'][1]*2
near = [PVdata.index[PVdata.index.get_loc(idx, method='nearest')] for
idx in attri['Times']]
idx_near = [timestampe if isinstance(timestampe, pd.Timestamp) else near[idx-1] for
idx, timestampe in enumerate(near)]
if isinstance(frac_line_DF, pd.DataFrame):
frac_line_DF = frac_line_DF.T
frac_line_DF['ave'] = frac_line_DF.mean(axis=1).values
frac_line_DF['hours'] = PVdata['hours'].loc[idx_near].values
frac_line_DF.to_csv(os.path.join(folder, 'fracline.csv'))
if ball_dict:
for DF_no, _ in enumerate(PV_plot_dict['ball_rad']):
ball_dict['ball_%g_DF' %(DF_no+1)] = ball_dict['ball_%g_DF' %(DF_no+1)].T
ball_dict['ball_%g_DF' %(DF_no+1)]['ave'] = ball_dict['ball_%g_DF' %(DF_no+1)].\
mean(axis=1).values
ball_dict['ball_%g_DF' %(DF_no+1)]['hours'] = PVdata['hours'].loc[idx_near].values
ball_dict['ball_%g_DF' %(DF_no+1)].to_csv(os.path.join(folder, 'ball%g.csv' % DF_no))
if statBall_dict:
statBall_dict['staball_DF'] = pd.DataFrame(statBall_dict['staball_p_val'], columns=['p_val'])
statBall_dict['staball_DF']['hours'] = PVdata['hours'].loc[idx_near].values
statBall_dict['staball_DF'].to_csv(os.path.join(folder, 'statBall.csv'))
if PV_plot_dict['area'] and PV_plot_dict['len_dia'] and PV_plot_dict['LVDT_a'] and PV_plot_dict['LVDT_r']:
y_time = PVdata[[PV_plot_dict['PVcol'],'Stress (MPa)', 'refIndex','hours', 'strainAx', 'strainVol']].loc[idx_near].reset_index()
elif PV_plot_dict['area']:
y_time = PVdata[[PV_plot_dict['PVcol'], 'Stress (MPa)','refIndex','hours']].loc[idx_near].reset_index()
else:
y_time = PVdata[[PV_plot_dict['PVcol'],'refIndex','hours']].loc[idx_near].reset_index()
y_time.to_csv(os.path.join(folder, 'PV_INV.csv'))
if t_steps:
y_time = y_time.loc[t_steps].reset_index()
y_time.index += 1
y_time.to_csv(os.path.join(folder, 'PV_INV_select.csv'), index_label='t_index')
view1 = [os.path.join(folder, invfolder)+'_view%g_%g.png' % (idx, time)
for idx, time in itertools.product([0], t_steps)]
view2 = [os.path.join(folder, invfolder)+'_view%g_%g.png' % (idx, time)
for idx, time in itertools.product([1], t_steps)]
m_tildes_str1 = ','.join(view1)
m_tildes_str2 = ','.join(view2)
with open(os.path.join(folder,'PV_INV_select.tex'), "w") as file:
file.write("\\newcommand{\TimesA}{%s}\n" % ','.join(staggered_list(view1,
view2)))
file.write("\\newcommand{\TimesB}{%s}\n" % m_tildes_str1)
file.write("\\newcommand{\TimesC}{%s}\n" % m_tildes_str2)
file.write("\\newcommand{\cmin}{%s}\n" % all_dataRange[0])
file.write("\\newcommand{\cmax}{%s}\n" % all_dataRange[1])
if PV_plot_dict['area']:
file.write("\\newcommand{\PeakStress}{%g}\n" % PeakStress)
file.write("\\newcommand{\\fracStress}{%g}\n" % fracStress)
file.write("\\newcommand{\percOfPeak}{%g}\n" % percOfPeak)
file.write("\\newcommand{\\fracOnsetHours}{%g}\n" % PV_plot_dict['frac_onset_hours'])
if PV_plot_dict['area'] and PV_plot_dict['len_dia'] and PV_plot_dict['LVDT_a'] and PV_plot_dict['LVDT_r']:
PVdata[[PV_plot_dict['PVcol'],'Stress (MPa)', 'refIndex','hours', 'strainAx', 'strainVol']].\
to_csv(os.path.join(folder, 'PVdata.csv'))
elif PV_plot_dict['area']:
PVdata[[PV_plot_dict['PVcol'],'Stress (MPa)', 'refIndex','hours']].to_csv(os.path.join(folder, 'PVdata.csv'))
else:
PVdata[[PV_plot_dict['PVcol'],'refIndex','hours']].to_csv(os.path.join(folder, 'PVdata.csv'))
return all_dataRange
def genRefMesh(m_tildes, pathRef='cly.Mesh', pathGen='baseMesh'):
'''generates base mesh vtu containing m_tilde data.
Parameters
----------
m_tildes : Array of floats
No. of cells by no of time-steps.
pathRef : str (default is 'cly.Mesh').
Path to .Mesh file.
pathGen : str (default is 'baseMesh').
Path to store the baseMesh.vtu.
Returns
-------
mesh : pyvista class
The ``m_tilde data`` cast onto the ``pathRef`` mesh.
all_dataRange : list
The range [min, max] data range over all time-steps.
'''
import mesh as msh
import pyvista as pv
# Read in the mesh
clyMesh = msh.utilities.meshOjfromDisk(meshObjectPath=pathRef)
# Save the base mesh
for idx,col in enumerate(m_tildes.T):
if col[0]>0:
break
clyMesh.setCellsVal(m_tildes.T[idx])
clyMesh.saveMesh(pathGen)
# Read in base mesh
mesh = pv.read(pathGen+".vtu")
# Deal With padded zeros
pad = np.argwhere(mesh.cell_arrays['gmsh:physical']>0)[0][0]
all_dataRange = [0,0]
# Add each time-step to the mesh and determine the data range
for time, m_tilde in enumerate(m_tildes.T):
mesh.cell_arrays['time%g' % time] = np.pad(m_tilde, (pad, 0), 'constant')
if mesh.cell_arrays['time%g' % time].min() < all_dataRange[0]:
all_dataRange[0] = mesh.cell_arrays['time%g' % time].min()
if mesh.cell_arrays['time%g' % time].max() > all_dataRange[1]:
all_dataRange[1] = mesh.cell_arrays['time%g' % time].max()
return mesh, all_dataRange
def movieFrames(outFolder = "Frac_rotate", add_vtk=None, delta_deg = 5, res = 3,
meshObjectPath='cly.Mesh', trim=True):
'''Produces frames intended for the construction of a movie. Currently function only allows
one to perform
Parameters
----------
outFolder : str (default='Frac_rotate')
The name of the output folder
add_vtk : list(mesh) (default=None)
A list of the mesh files to add to the domain
delta_deg : int (default=5)
rotation delta in degrees
res : int (default=3)
Resolution multiplyer
meshObjectPath : str (default='cly.Mesh')
Path to the base mesh file.
trim : bool (default=True)
Trim white space from each png produced.
'''
import pyvista as pv
import mesh as msh
# Load mesh object
clyMesh = msh.utilities.meshOjfromDisk(meshObjectPath)
clyMesh.saveMesh('baseMesh')
# Read in base mesh
core = pv.read('baseMesh.vtu')
# Make output folder
os.makedirs(outFolder, exist_ok=True)
res = 3
p = pv.Plotter(off_screen=True, window_size=[600*res, 800*res])
p.add_mesh(core, lighting=True, opacity=0.1)
if add_vtk:
for mesh in add_vtk:
p.add_mesh(mesh,lighting=True)
p.isometric_view()
p.set_background("white")
p.remove_scalar_bar()
# p.show_bounds(color='black',font_size=150)
increments = 360//delta_deg
for turn in range(increments):
if add_vtk:
for mesh in add_vtk:
mesh.rotate_z(delta_deg)
core.rotate_z(delta_deg)
file_name = os.path.join(outFolder, "rorate_%g.png" % turn)
p.screenshot(file_name)
# ----------------- trim whitespace -----------------
if trim:
utilities.whiteSpaceTrim(folderName=outFolder, file_match='rorate_'+'*.png')
def whiteSpaceTrim(folderName='', file_match='*.png'):
'''Trims the white space from images.
Parameters
----------
folderName : str (default='')
Path to folder within which target images are held
file_match : str (default='*.png')
'''
from PIL import Image
import glob
import os
from PIL import ImageOps
files = glob.glob(os.path.join(folderName, file_match))
print(files)
for file in files:
image=Image.open(file)
image.load()
imageSize = image.size
# remove alpha channel
invert_im = image.convert("RGB")
# invert image (so that white is 0)
invert_im = ImageOps.invert(invert_im)
imageBox = invert_im.getbbox()
cropped=image.crop(imageBox)
print (file, "Size:", imageSize, "New Size:", imageBox)
cropped.save(file)
class decorrTheory():
'''Calculate the theoretical decorrelation ceofficient between each source
receiver pair, based on the Code-Wave Diffusion method (Rossetto2011).
Parameters
----------
src_rec : list(tuples)
List of each source-receiver pair.
channels : DataFrame
Index of channel numbers and the corresponding x,y,z (columns) positions
corresponding to the 'src' and corresponding 'rec' numbers. Units should be
according to the physical parameters of ``D`` and ``sigma``.
X_b : array(x,y,z)
Location coords of perturbation within model domain in SI units.
t : float or list(float)
Centre time of the correlation windows with which the comparison is to be made.
param : dict
Expected parameters of medium, for example ``D`` (diffusion coefficient) and
``sigma`` (scattering cross-section).
mesh_obj : object
Mesh object generated from the ``mesh`` class.
srRx_df : DataFrame
Calculated based on the input ``src_rec``, ``channels``, and ``X_b`` data.
Multi-index df of s,r, and R, and Kt columns for each src, rec, tetraNo.
G : np.array
Matrix G of row.No = len(src_rec) * len(t).
Notes
-----
'''
def __init__(self,src_rec, channels, X_b, t, param, mesh_obj=None):
self.src_rec = src_rec
self.channels = channels
self.X_b = X_b
self.t = t
self.param = param
self.mesh_obj = mesh_obj
self.srRx_df = None
self.G = None
def Kt_run(self):
'''Run sequence of functions to produce the required inputs for inversion.
Note
----
The theoretical decorrelation coefficient will be calculated for each
``self.t`` provided, with the result appended to the matrix self.G.
'''
# Generate the inputs for each source receiver pair
self.src_rec_srR()
# Calculate the sensitivity kernal values for each cell.
for idx, _ in enumerate(self.t):
# The kernel
self.Kt(t_no=idx)
# make the matrix
self.Kt_mat()
# store the matrix
utilities.HDF5_data_save('CWD_Inversion.h5', 'Inversion', 'G',
self.G, {'src_rec': self.src_rec, **self.param})
def Kt(self, t_no=0):
'''The theoretical decorrelation coefficient between a single source-receiver
pair.
Parameters
----------
t_no : int (Default=0)
The time at which the kernel is calculated
Notes
-----
The '_b' notation signifies a vector in cartesian coordinates
'''
try:
t = self.t[t_no]
except IndexError:
t = self.t
# should perform for all S/R paris in matrix notation.
self.srRx_df['Kt'] = self.param['c']*self.param['sigma']/2 * \
1/(4 * np.pi * self.param['D']) * \
(1/self.srRx_df.s + 1/self.srRx_df.r) * np.exp((self.srRx_df.R**2 - \
(self.srRx_df.s + self.srRx_df.r)**2)/(4 * self.param['D'] * t))
def Kt_mat(self):
'''Construct the kernel matrix or rows for each source/receiver pair and
columns for each tet. cell centre.
Notes
-----
'''
# should perform for all S/R paris in matrix notation.
if self.G is None:
self.G = self.srRx_df.Kt.unstack(level=[2]).values
else:
self.G = np.append(self.G, self.srRx_df.Kt.unstack(level=[2]).values, axis=0)
def src_rec_srR(self):
'''Calculates the corresponding ``s``, ``r``, and ``R`` for all source
receiver pairs found in ``self.src_rec``.
s = |S_b-X_b|
r = |r_b-X_b|
R = |S_b-r_b|
'''
# List of src and rec
src = [src[0] for src in self.src_rec]
rec = [rec[1] for rec in self.src_rec]
# Create df with the src rec positions used (i.e. found in self.src_rec)
srR_df = | pd.DataFrame({'src': src, 'rec': rec}) | pandas.DataFrame |
import pandas as pd
from collections import defaultdict
from urllib.parse import urlparse
import math
df = pd.read_csv('Final_newData_withFeatures.csv')
urls = df['0']
entropies = []
for index, url in enumerate(urls):
domain=""
if url[:4] == 'http':
domain = urlparse(url).netloc
else:
domain = urlparse('http://'+url).netloc
entropy = 0
str_len = len(domain)
chars = defaultdict(int)
for char in domain:
chars[char] += 1
for char in domain:
pj = (chars[char]/str_len)
entropy += pj*math.log(pj,2)
entropies.append((-1)*entropy)
df['6'] = | pd.Series(entropies) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# import libraries
import numpy as np
import pandas as pd
import streamlit as st
import plotly as pt
import matplotlib.pyplot as plt
from collections import Counter
import seaborn as sns
#import pandas_profiling as pf
import plotly.express as px
import plotly.graph_objects as go
sns.set_style("darkgrid")
# ignore warnings
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# my app’s title
#st.title('Ongo-Total Run Experience Subscription Enhancement')
#st.markdown("""# **OngoBoost-Total Run Experience Subscription Enhancement**""")
st.markdown("""
<style>
body{
#color:;
background-color: #E4F2FE;
}
</style>
""",unsafe_allow_html=True)
#st.markdown("""# ** **""")#ff8c69
st.markdown("<h1 style='text-align: center; color: ;'><b>OngoBoost: Subscribe Today Run Tomorrow!</b></h1>", unsafe_allow_html=True)
st.markdown("<h3 style='text-align: left; color: ;'><b></b></h3>", unsafe_allow_html=True)
#st.markdown("<h3 style='text-align: left; color: ;'><b></b></h3>", unsafe_allow_html=True)
#st.title("OngoBoost-Subscribe Today Run Tomorrow")
#st.markdown("<h1 style='text-align: center; color: red;'></h1>", unsafe_allow_html=True)
#st.markdown(<style>h1{color: red}='text-align: center; color: red;'>, unsafe_allow_html=True)
#st.header("Upload New Users")
st.markdown("<h4 style='text-align: left; color: ;'><b>Upload New Users</b></h4>", unsafe_allow_html=True)
upload_flag = st.radio("", ("Yes, upload new user data", "No, use preloaded data"), index=1)
if upload_flag=="Yes, upload new user data":
csv_file = st.file_uploader(label="", type=["csv"], encoding="utf-8")#Upload a CSV file
if csv_file is not None:
data = pd.read_csv(csv_file)
#if st.checkbox("Show data"):
st.dataframe(data)
else:
def get_data():
#url = r"test_streamlit.csv"
#path = '/Users/sayantan/Desktop/test_streamlit.csv'
path = 'test_streamlit.csv'
return pd.read_csv(path)
data = get_data()
st.dataframe(data.head())
#try:
num_col = ['metric_started_app_session_week1','metric_started_app_session_week2',
'metric_started_app_session_week3','metric_started_app_session_week4',
'converted_to_enrolled_program', 'converted_to_started_session',
'converted_to_completed_session','converted_to_started_subscription']
data = data[num_col]
data_cols = data.columns
data_index = data.index
from sklearn.externals import joblib
knni =joblib.load('knni_imputer.joblib')
data = knni.transform(data)
data = | pd.DataFrame(data=data,index=data_index,columns=data_cols) | pandas.DataFrame |
import numpy as np
import pandas as pd
import h5py
import os
import math
import pickle
from datetime import timedelta
from modules.image_processor import cart2polar
def remove_outlier_and_nan(numpy_array, upper_bound=1000):
numpy_array = np.nan_to_num(numpy_array, copy=False)
numpy_array[numpy_array > upper_bound] = 0
VIS = numpy_array[:, :, :, 2]
VIS[VIS > 1] = 1 # VIS channel ranged from 0 to 1
return numpy_array
def flip_SH_images(info_df, image_matrix):
SH_idx = info_df.index[info_df.region == 'SH']
image_matrix[SH_idx] = np.flip(image_matrix[SH_idx], 1)
return image_matrix
def mark_good_quality_VIS(label_df, image_matrix):
tmp_df = pd.DataFrame(columns=['vis_mean', 'vis_std'])
for i in range(image_matrix.shape[0]):
VIS_matrix = image_matrix[i, :, :, 2]
tmp_df.loc[i] = [VIS_matrix.mean(), VIS_matrix.std()]
tmp_df['hour'] = label_df.apply(lambda x: x.local_time.hour, axis=1)
return tmp_df.apply(
lambda x: (0.1 <= x.vis_mean <= 0.7) and (0.1 <= x.vis_std <= 0.31) and (7 <= x.hour <= 16),
axis=1
)
def fix_reversed_VIS(image_matrix):
def scale_to_0_1(matrix):
out = matrix - matrix.min()
tmp_max = out.max()
if tmp_max != 0:
out /= tmp_max
return out
for i in range(image_matrix.shape[0]):
IR1_matrix = image_matrix[i, :, :, 0]
VIS_matrix = image_matrix[i, :, :, 2]
reversed_VIS_matrix = 1 - VIS_matrix
VIS_IR1_distance = abs(scale_to_0_1(IR1_matrix) - scale_to_0_1(VIS_matrix)).mean()
reversed_VIS_IR1_distance = abs(scale_to_0_1(IR1_matrix) - scale_to_0_1(reversed_VIS_matrix)).mean()
if reversed_VIS_IR1_distance > VIS_IR1_distance:
VIS_matrix *= -1
VIS_matrix += 1
def get_minutes_to_noon(local_time):
minutes_in_day = 60 * local_time.hour + local_time.minute
noon = 60 * 12
return abs(noon - minutes_in_day)
def extract_label_and_feature_from_info(info_df):
# --- region feature ---
info_df['region_code'] = pd.Categorical(info_df.region).codes
info_df['lon'] = (info_df.lon+180) % 360 - 180 # calibrate longitude, ex: 190 -> -170
# --- time feature ---
info_df['GMT_time'] = pd.to_datetime(info_df.time, format='%Y%m%d%H')
info_df['local_time'] = info_df.GMT_time \
+ info_df.apply(lambda x: timedelta(hours=x.lon/15), axis=1)
# --- year_day ---
SH_idx = info_df.index[info_df.region == 'SH']
info_df['yday'] = info_df.local_time.apply(lambda x: x.timetuple().tm_yday)
info_df.loc[SH_idx, 'yday'] += 365 / 2 # TC from SH
info_df['yday_transform'] = info_df.yday.apply(lambda x: x / 365 * 2 * math.pi)
info_df['yday_sin'] = info_df.yday_transform.apply(lambda x: math.sin(x))
info_df['yday_cos'] = info_df.yday_transform.apply(lambda x: math.cos(x))
# --- hour ---
info_df['hour_transform'] = info_df.apply(lambda x: x.local_time.hour / 24 * 2 * math.pi, axis=1)
info_df['hour_sin'] = info_df.hour_transform.apply(lambda x: math.sin(x))
info_df['hour_cos'] = info_df.hour_transform.apply(lambda x: math.cos(x))
# split into 2 dataframe
label_df = info_df[['region', 'ID', 'local_time', 'Vmax', 'R34', 'MSLP', 'valid_profile']]
feature_df = info_df[['lon', 'lat', 'region_code', 'yday_cos', 'yday_sin', 'hour_cos', 'hour_sin']]
return label_df, feature_df
def data_cleaning_and_organizing(images, info_df):
images = remove_outlier_and_nan(images)
images = flip_SH_images(info_df, images)
fix_reversed_VIS(images)
label_df, feature_df = extract_label_and_feature_from_info(info_df)
feature_df['minutes_to_noon'] = label_df['local_time'].apply(get_minutes_to_noon)
feature_df['is_good_quality_VIS'] = mark_good_quality_VIS(label_df, images)
return images, label_df, feature_df
def data_split(images, label_df, feature_df, structure_profiles, phase):
if phase == 'train':
target_index = label_df.index[label_df.ID < '2015000']
elif phase == 'valid':
target_index = label_df.index[np.logical_and('2017000' > label_df.ID, label_df.ID > '2015000')]
elif phase == 'test':
target_index = label_df.index[label_df.ID > '2017000']
return {
'label': label_df.loc[target_index].reset_index(drop=True),
'feature': feature_df.loc[target_index].reset_index(drop=True),
'image': images[target_index],
'profile': structure_profiles[target_index]
}
def extract_features_from_raw_file(file_path, coordinate):
# if not os.path.isfile(file_path):
# print(f'file {file_path} not found! try to download it!')
# download_data(data_folder)
with h5py.File(file_path, 'r') as hf:
images = hf['images'][:]
structure_profiles = hf['structure_profiles'][:]
# collect info from every file in the list
info_df = | pd.read_hdf(file_path, key='info', mode='r') | pandas.read_hdf |
from sklearn import svm, datasets
import sklearn.model_selection as model_selection
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
import numpy as np
import matplotlib.pyplot as plt
import glob
import cv2
import os
import seaborn as sns
import pandas as pd
import sys
from skimage.filters import sobel
from skimage.feature import graycomatrix, graycoprops, local_binary_pattern
from sklearn import preprocessing
import timeit
from skimage.measure import shannon_entropy
import scipy.stats as fo
from skimage.filters import gabor
from radiomics import glcm
# Resize images to
SIZE = 300
# Numpy Array (explicação)
# The main benefits of using NumPy arrays should be smaller memory consumption and better runtime behavior.
def train_set():
# Imagens de treino e suas classes
train_images = []
train_labels = []
train_medias = []
media_RGB = []
# Varre a pasta de treino pegando as fotos da pasta
for directory_path in glob.glob("amendoas/train_/*"):
label = directory_path.split("\\")[-1]
for img_path in glob.glob(os.path.join(directory_path, "*.jpg")):
#
# imagem_colorida = cv2.imread(img_path)
# # train_medias.append(cv2.mean(imagem_colorida))
# imagem_colorida = np.array(imagem_colorida, dtype=float)
# imagem_colorida[imagem_colorida == 0] = np.nan
#
# train_medias.append(np.nanmean(imagem_colorida, axis=(0, 1)))
media_RGB = cv2.mean(cv2.imread(img_path))
train_medias.append(media_RGB)
img = cv2.imread(img_path, 0) # Reading color images
img = cv2.resize(img, (SIZE, SIZE)) # Resize images
train_images.append(img)
train_labels.append(label)
# print(media_RGB)
# media_cinza = cv2.mean(img)[0]
#
# media_RGB[0] += media_cinza
# media_RGB[1] += media_cinza
# media_RGB[2] += media_cinza
# print(media_RGB)
# Coleção total de fotos de treino e classes em NP.array
return np.array(train_images), np.array(train_labels), np.array(train_medias)
def test_set():
# Imagens de teste e suas classes
test_images = []
test_labels = []
test_medias = []
media_RGB = []
# Varre a pasta de teste pegando as fotos da pasta
for directory_path in glob.glob("amendoas/test_/*"):
fruit_label = directory_path.split("\\")[-1]
for img_path in glob.glob(os.path.join(directory_path, "*.jpg")):
#
# imagem_colorida = cv2.imread(img_path)
# # train_medias.append(cv2.mean(imagem_colorida))
# imagem_colorida = np.array(imagem_colorida, dtype=float)
# imagem_colorida[imagem_colorida == 0] = np.nan
# test_medias.append(np.nanmean(imagem_colorida, axis=(0, 1)))
media_RGB = cv2.mean(cv2.imread(img_path))
test_medias.append(media_RGB)
img = cv2.imread(img_path, 0)
img = cv2.resize(img, (SIZE, SIZE)) # Resize images
test_images.append(img)
test_labels.append(fruit_label)
# print(media_RGB)
#
# media_cinza = cv2.mean(img)[0]
#
# media_RGB[0] += media_cinza
# media_RGB[1] += media_cinza
# media_RGB[2] += media_cinza
# print(media_RGB)
# Coleção total de fotos de teste e classes em NP.array
return np.array(test_images), np.array(test_labels), np.array(test_medias)
def pre_processing(le, train_labels, test_labels):
# Codifica os labels em valores entre 0 e n_classes - 1 (inteiros)
# Tanto pro treino quanto pro teste
le.fit(train_labels)
train_labels_encoded = le.transform(train_labels)
le.fit(test_labels)
test_labels_encoded = le.transform(test_labels)
return train_labels_encoded, test_labels_encoded
###################################################################
# FEATURE EXTRACTOR function
# input shape is (n, x, y, c) - number of images, x, y, and channels
def feature_extractor(dataset, medias):
image_dataset = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
import sys
import time
import xgboost as xgb
from add_feture import *
FEATURE_EXTRACTION_SLOT = 10
LabelDay = datetime.datetime(2014,12,18,0,0,0)
Data = pd.read_csv("../../../../data/fresh_comp_offline/drop1112_sub_item.csv")
Data['daystime'] = Data['days'].map(lambda x: time.strptime(x, "%Y-%m-%d")).map(lambda x: datetime.datetime(*x[:6]))
def get_train(train_user,end_time):
# 取出label day 前一天的记录作为打标记录
data_train = train_user[(train_user['daystime'] == (end_time-datetime.timedelta(days=1)))]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
# 训练样本中,删除重复的样本
data_train = data_train.drop_duplicates(['user_id', 'item_id'])
data_train_ui = data_train['user_id'] / data_train['item_id']
# print(len(data_train))
# 使用label day 的实际购买情况进行打标
data_label = train_user[train_user['daystime'] == end_time]
data_label_buy = data_label[data_label['behavior_type'] == 4]
data_label_buy_ui = data_label_buy['user_id'] / data_label_buy['item_id']
# 对前一天的交互记录进行打标
data_train_labeled = data_train_ui.isin(data_label_buy_ui)
dict = {True: 1, False: 0}
data_train_labeled = data_train_labeled.map(dict)
data_train['label'] = data_train_labeled
return data_train[['user_id', 'item_id','item_category', 'label']]
def get_label_testset(train_user,LabelDay):
# 测试集选为上一天所有的交互数据
data_test = train_user[(train_user['daystime'] == LabelDay)]#&((train_user.behavior_type==3)|(train_user.behavior_type==2))
data_test = data_test.drop_duplicates(['user_id', 'item_id'])
return data_test[['user_id', 'item_id','item_category']]
def item_category_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_category,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_category,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_category,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_category,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_category_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,countAverage,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,buyRate,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_category_feture = pd.merge(item_category_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_category_feture = pd.merge(item_category_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_category_feture.fillna(0,inplace=True)
return item_category_feture
def item_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
item_count = pd.crosstab(data.item_id,data.behavior_type)
item_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
item_count_before5 = pd.crosstab(beforefiveday.item_id,beforefiveday.behavior_type)
item_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
item_count_before_3 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
item_count_before_2 = pd.crosstab(beforethreeday.item_id,beforethreeday.behavior_type)
item_count_unq = data.groupby(by = ['item_id','behavior_type']).agg({"user_id":lambda x:x.nunique()});item_count_unq = item_count_unq.unstack()
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayitem_count = pd.crosstab(beforeoneday.item_id,beforeoneday.behavior_type)
countAverage = item_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = item_count[1]/item_count[4]
buyRate['skim'] = item_count[2]/item_count[4]
buyRate['collect'] = item_count[3]/item_count[4]
buyRate.index = item_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = item_count_before5[1]/item_count_before5[4]
buyRate_2['skim'] = item_count_before5[2]/item_count_before5[4]
buyRate_2['collect'] = item_count_before5[3]/item_count_before5[4]
buyRate_2.index = item_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = item_count_before_3[1]/item_count_before_3[4]
buyRate_3['skim'] = item_count_before_3[2]/item_count_before_3[4]
buyRate_3['collect'] = item_count_before_3[3]/item_count_before_3[4]
buyRate_3.index = item_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
item_id_feture = pd.merge(item_count,beforeonedayitem_count,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,countAverage,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,buyRate,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_unq,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before5,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_3,how='left',right_index=True,left_index=True)
item_id_feture = pd.merge(item_id_feture,item_count_before_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# item_id_feture = pd.merge(item_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
item_id_feture.fillna(0,inplace=True)
return item_id_feture
def user_id_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_count = pd.crosstab(data.user_id,data.behavior_type)
user_count_before5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_count_before5 = pd.crosstab(beforefiveday.user_id,beforefiveday.behavior_type)
user_count_before_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_count_before_3 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
user_count_before_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_count_before_2 = pd.crosstab(beforethreeday.user_id,beforethreeday.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
beforeonedayuser_count = pd.crosstab(beforeoneday.user_id,beforeoneday.behavior_type)
countAverage = user_count/FEATURE_EXTRACTION_SLOT
buyRate = pd.DataFrame()
buyRate['click'] = user_count[1]/user_count[4]
buyRate['skim'] = user_count[2]/user_count[4]
buyRate['collect'] = user_count[3]/user_count[4]
buyRate.index = user_count.index
buyRate_2 = pd.DataFrame()
buyRate_2['click'] = user_count_before5[1]/user_count_before5[4]
buyRate_2['skim'] = user_count_before5[2]/user_count_before5[4]
buyRate_2['collect'] = user_count_before5[3]/user_count_before5[4]
buyRate_2.index = user_count_before5.index
buyRate_3 = pd.DataFrame()
buyRate_3['click'] = user_count_before_3[1]/user_count_before_3[4]
buyRate_3['skim'] = user_count_before_3[2]/user_count_before_3[4]
buyRate_3['collect'] = user_count_before_3[3]/user_count_before_3[4]
buyRate_3.index = user_count_before_3.index
buyRate = buyRate.replace([np.inf, -np.inf], 0)
buyRate_2 = buyRate_2.replace([np.inf, -np.inf], 0)
buyRate_3 = buyRate_3.replace([np.inf, -np.inf], 0)
long_online = pd.pivot_table(beforeoneday,index=['user_id'],values=['hours'],aggfunc=[np.min,np.max,np.ptp])
user_id_feture = pd.merge(user_count,beforeonedayuser_count,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,countAverage,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,buyRate,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before5,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_3,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,user_count_before_2,how='left',right_index=True,left_index=True)
user_id_feture = pd.merge(user_id_feture,long_online,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_2,how='left',right_index=True,left_index=True)
# user_id_feture = pd.merge(user_id_feture,buyRate_3,how='left',right_index=True,left_index=True)
user_id_feture.fillna(0,inplace=True)
return user_id_feture
def user_item_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_item_count = pd.crosstab([data.user_id,data.item_id],data.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
user_item_count_5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5+2)]
user_item_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_id],beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=end_time-datetime.timedelta(days=5)]
user_item_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_id],beforefiveday.behavior_type)
user_item_count_3=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3+2)]
user_item_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=3)]
user_item_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
user_item_count_2=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7+2)]
user_item_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=end_time-datetime.timedelta(days=7)]
user_item_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_id],beforethreeday.behavior_type)
beforeonedayuser_item_count = pd.crosstab([beforeoneday.user_id,beforeoneday.item_id],beforeoneday.behavior_type)
# _live = user_item_long_touch(data)
max_touchtime = pd.pivot_table(beforeoneday,index=['user_id','item_id'],values=['hours'],aggfunc=[np.min,np.max])
max_touchtype = pd.pivot_table(beforeoneday,index=['user_id','item_id'],values=['behavior_type'],aggfunc=np.max)
user_item_feture = pd.merge(user_item_count,beforeonedayuser_item_count,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,max_touchtime,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,max_touchtype,how='left',right_index=True,left_index=True)
# user_item_feture = pd.merge(user_item_feture,_live,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_5,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_3,how='left',right_index=True,left_index=True)
user_item_feture = pd.merge(user_item_feture,user_item_count_2,how='left',right_index=True,left_index=True)
user_item_feture.fillna(0,inplace=True)
return user_item_feture
def user_cate_feture(data,end_time,beforeoneday):
# data = Data[(Data['daystime']<LabelDay) & (Data['daystime']>LabelDay-datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))]
user_item_count = pd.crosstab([data.user_id,data.item_category],data.behavior_type)
# beforeoneday = Data[Data['daystime'] == LabelDay-datetime.timedelta(days=1)]
user_cate_count_5=None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforefiveday = data[data['daystime']>=(end_time-datetime.timedelta(days=5+2))]
user_cate_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_category],beforefiveday.behavior_type)
else:
beforefiveday = data[data['daystime']>=(end_time-datetime.timedelta(days=5))]
user_cate_count_5 = pd.crosstab([beforefiveday.user_id,beforefiveday.item_category],beforefiveday.behavior_type)
user_cate_count_3 = None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=(end_time-datetime.timedelta(days=3+2))]
user_cate_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_category],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=(end_time-datetime.timedelta(days=3))]
user_cate_count_3 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_category],beforethreeday.behavior_type)
user_cate_count_2 = None
if (((end_time-datetime.timedelta(days=5))<datetime.datetime(2014,12,13,0,0,0))&((end_time-datetime.timedelta(days=5))>datetime.datetime(2014,12,10,0,0,0))):
beforethreeday = data[data['daystime']>=(end_time-datetime.timedelta(days=7+2))]
user_cate_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_category],beforethreeday.behavior_type)
else:
beforethreeday = data[data['daystime']>=(end_time-datetime.timedelta(days=7))]
user_cate_count_2 = pd.crosstab([beforethreeday.user_id,beforethreeday.item_category],beforethreeday.behavior_type)
# _live = user_cate_long_touch(data)
beforeonedayuser_item_count = pd.crosstab([beforeoneday.user_id,beforeoneday.item_category],beforeoneday.behavior_type)
max_touchtime = pd.pivot_table(beforeoneday,index=['user_id','item_category'],values=['hours'],aggfunc=[np.min,np.max])
max_touchtype = pd.pivot_table(beforeoneday,index=['user_id','item_category'],values=['behavior_type'],aggfunc=np.max)
user_cate_feture = pd.merge(user_item_count,beforeonedayuser_item_count,how='left',right_index=True,left_index=True)
user_cate_feture = pd.merge(user_cate_feture,max_touchtime,how='left',right_index=True,left_index=True)
user_cate_feture = pd.merge(user_cate_feture,max_touchtype,how='left',right_index=True,left_index=True)
# user_cate_feture = pd.merge(user_cate_feture,_live,how='left',right_index=True,left_index=True)
user_cate_feture = pd.merge(user_cate_feture,user_cate_count_5,how='left',right_index=True,left_index=True)
user_cate_feture = pd.merge(user_cate_feture,user_cate_count_3,how='left',right_index=True,left_index=True)
user_cate_feture = pd.merge(user_cate_feture,user_cate_count_2,how='left',right_index=True,left_index=True)
user_cate_feture.fillna(0,inplace=True)
return user_cate_feture
if __name__ == '__main__':
# pass
result=[]
for i in range(15):
train_user_window1 = None
if (LabelDay >= datetime.datetime(2014,12,12,0,0,0)):
train_user_window1 = Data[(Data['daystime'] > (LabelDay - datetime.timedelta(days=FEATURE_EXTRACTION_SLOT+2))) & (Data['daystime'] < LabelDay)]
else:
train_user_window1 = Data[(Data['daystime'] > (LabelDay - datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))) & (Data['daystime'] < LabelDay)]
# train_user_window1 = Data[(Data['daystime'] > (LabelDay - datetime.timedelta(days=FEATURE_EXTRACTION_SLOT))) & (Data['daystime'] < LabelDay)]
beforeoneday = Data[Data['daystime'] == (LabelDay-datetime.timedelta(days=1))]
# beforetwoday = Data[(Data['daystime'] >= (LabelDay-datetime.timedelta(days=2))) & (Data['daystime'] < LabelDay)]
# beforefiveday = Data[(Data['daystime'] >= (LabelDay-datetime.timedelta(days=5))) & (Data['daystime'] < LabelDay)]
x = get_train(Data, LabelDay)
add_user_click_1 = user_click(beforeoneday)
add_user_item_click_1 = user_item_click(beforeoneday)
add_user_cate_click_1 = user_cate_click(beforeoneday)
# add_user_click_2 = user_click(beforetwoday)
# add_user_click_5 = user_click(beforefiveday)
liveday = user_liveday(train_user_window1)
# sys.exit()
a = user_id_feture(train_user_window1, LabelDay,beforeoneday)
a = a.reset_index()
b = item_id_feture(train_user_window1, LabelDay,beforeoneday)
b = b.reset_index()
c = item_category_feture(train_user_window1, LabelDay,beforeoneday)
c = c.reset_index()
d = user_cate_feture(train_user_window1, LabelDay,beforeoneday)
d = d.reset_index()
e = user_item_feture(train_user_window1, LabelDay,beforeoneday)
e = e.reset_index()
x = pd.merge(x,a,on=['user_id'],how='left')
x = pd.merge(x,b,on=['item_id'],how='left')
x = pd.merge(x,c,on=['item_category'],how='left')
x = pd.merge(x,d,on=['user_id','item_category'],how='left')
x = pd.merge(x,e,on=['user_id','item_id'],how='left')
x = pd.merge(x,add_user_click_1,left_on = ['user_id'],right_index=True,how = 'left' )
# x = pd.merge(x,add_user_click_2,left_on = ['user_id'],right_index=True,how = 'left' )
# x = pd.merge(x,add_user_click_5,left_on = ['user_id'],right_index=True,how = 'left' )
x = pd.merge(x,add_user_item_click_1,left_on = ['user_id','item_id'],right_index=True,how = 'left' )
x = pd.merge(x,add_user_cate_click_1,left_on = ['user_id','item_category'],right_index=True,how = 'left' )
x = pd.merge(x,liveday,left_on = ['user_id'],right_index=True,how = 'left' )
x = x.fillna(0)
print(i,LabelDay,len(x))
LabelDay = LabelDay-datetime.timedelta(days=1)
if (LabelDay == datetime.datetime(2014,12,13,0,0,0)):
LabelDay = datetime.datetime(2014,12,10,0,0,0)
result.append(x)
train_set = pd.concat(result,axis=0,ignore_index=True)
train_set.to_csv('./train_train_no_jiagou.csv',index=None)
###############################################
LabelDay=datetime.datetime(2014,12,18,0,0,0)
test = get_label_testset(Data,LabelDay)
train_user_window1 = Data[(Data['daystime'] > (LabelDay - datetime.timedelta(days=FEATURE_EXTRACTION_SLOT-1))) & (Data['daystime'] <= LabelDay)]
beforeoneday = Data[Data['daystime'] == LabelDay]
# beforetwoday = Data[(Data['daystime'] >= (LabelDay-datetime.timedelta(days=2))) & (Data['daystime'] < LabelDay)]
# beforefiveday = Data[(Data['daystime'] >= (LabelDay-datetime.timedelta(days=5))) & (Data['daystime'] < LabelDay)]
add_user_click = user_click(beforeoneday)
add_user_item_click = user_item_click(beforeoneday)
add_user_cate_click = user_cate_click(beforeoneday)
# add_user_click_2 = user_click(beforetwoday)
# add_user_click_5 = user_click(beforefiveday)
liveday = user_liveday(train_user_window1)
a = user_id_feture(train_user_window1, LabelDay,beforeoneday)
a = a.reset_index()
b = item_id_feture(train_user_window1, LabelDay,beforeoneday)
b = b.reset_index()
c = item_category_feture(train_user_window1, LabelDay,beforeoneday)
c = c.reset_index()
d = user_cate_feture(train_user_window1, LabelDay,beforeoneday)
d = d.reset_index()
e = user_item_feture(train_user_window1, LabelDay,beforeoneday)
e = e.reset_index()
test = pd.merge(test,a,on=['user_id'],how='left')
test = pd.merge(test,b,on=['item_id'],how='left')
test = pd.merge(test,c,on=['item_category'],how='left')
test = | pd.merge(test,d,on=['user_id','item_category'],how='left') | pandas.merge |
import os
from pathlib import Path
import pandas as pd
import requests
class OisManager:
TOIS_CSV_URL = 'https://exofop.ipac.caltech.edu/tess/download_toi.php?sort=toi&output=csv'
CTOIS_CSV_URL = 'https://exofop.ipac.caltech.edu/tess/download_ctoi.php?sort=ctoi&output=csv'
KOIS_LIST_URL = 'https://exofop.ipac.caltech.edu/kepler/targets.php?sort=num-pc&page1=1&ipp1=100000&koi1=&koi2='
KIC_STAR_URL = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=keplerstellar&select=kepid,dist'
KOI_CSV_URL = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=cumulative'
EPIC_CSV_URL = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=k2candidates&' \
'select=epic_name,epic_candname,k2c_disp,pl_orbper,st_dist,st_teff,st_logg,st_metfe,st_metratio,' \
'st_vsini,st_kep,pl_trandep,pl_trandur,pl_rade,pl_eqt,pl_orbincl,ra_str,dec_str,pl_tranmid'
TOIS_CSV = 'tois.csv'
CTOIS_CSV = 'ctois.csv'
KOIS_CSV = 'kois.csv'
EPIC_CSV = 'epic_ois.csv'
KIC_STAR_CSV = 'kic_star.csv'
ois = None
def __init__(self):
home = str(Path.home()) + "/.sherlockpipe/"
if not os.path.exists(home):
os.mkdir(home)
self.tois_csv = home + self.TOIS_CSV
self.ctois_csv = home + self.CTOIS_CSV
self.kois_csv = home + self.KOIS_CSV
self.epic_csv = home + self.EPIC_CSV
self.kic_star_csv = home + self.KIC_STAR_CSV
def load_ois(self):
if not os.path.isfile(self.tois_csv) or not os.path.isfile(self.ctois_csv):
print("TOIs files are not found. Downloading...")
self.update_tic_csvs()
print("TOIs files download is finished!")
toi_data = pd.read_csv(self.tois_csv)
ois = toi_data
ctoi_data = pd.read_csv(self.ctois_csv)
ois = pd.concat([ois, ctoi_data])
if not os.path.isfile(self.kois_csv):
print("KOIs files are not found. Downloading...")
self.update_kic_csvs()
print("KOIs files download is finished!")
koi_data = pd.read_csv(self.kois_csv)
ois = pd.concat([ois, koi_data])
if not os.path.isfile(self.epic_csv):
print("EPIC IDs files are not found. Downloading...")
self.update_epic_csvs()
print("EPIC IDs files download is finished!")
epic_data = pd.read_csv(self.epic_csv)
ois = pd.concat([ois, epic_data])
return ois
def update_tic_csvs(self):
tic_csv = open(self.tois_csv, 'wb')
request = requests.get(self.TOIS_CSV_URL)
tic_csv.write(request.content)
tic_csv.close()
tic_csv = open(self.ctois_csv, 'wb')
request = requests.get(self.CTOIS_CSV_URL)
tic_csv.write(request.content)
tic_csv.close()
toi_data = | pd.read_csv(self.tois_csv) | pandas.read_csv |
import numpy as np
import gdax
import json
import logging
from os.path import expanduser
import pandas as pd
from backfire.bots import bot_db
logger = logging.getLogger(__name__)
def load_gdax_auth(test_bool):
home = expanduser("~")
if test_bool == True:
gdax_auth = json.load(open(f'{home}/auth/gdax_sb'))
if test_bool == False:
gdax_auth = json.load(open(f'{home}/auth/gdax'))
key = gdax_auth['key']
secret = gdax_auth['secret']
passphrase = gdax_auth['passphrase']
return(key, secret, passphrase)
def initialize_gdax(test_bool):
key, secret, passphrase = load_gdax_auth(test_bool)
if test_bool == True:
logger.info("Initialize GDAX Sandbox API")
bot_db.db.my_db = 'gdax_test'
bot_db.db.set_engine()
ac = gdax.AuthenticatedClient(key, secret, passphrase,
api_url="https://api-public.sandbox.gdax.com")
if test_bool == False:
logger.info("Initialize live GDAX API")
bot_db.db.my_db = 'gdax'
bot_db.db.set_engine()
ac = gdax.AuthenticatedClient(key, secret, passphrase)
return(ac)
def gdax_get_orders(ac, uniq_orders):
order_list = []
for o in uniq_orders:
order = ac.get_order(o)
order_list.append(order)
return(order_list)
def update_orders(ac):
gdax_orders = ac.get_orders()
gdax_orders = [item for sublist in gdax_orders for item in sublist]
if len(gdax_orders) > 0:
orders_df = bot_db.prep_gdax_order_df(gdax_orders)
gdax_order_ids = orders_df['order_id'].tolist()
else:
gdax_order_ids = gdax_orders
sql_order_ids = bot_db.get_cur_orders()
new_order_ids = set(gdax_order_ids) - set(sql_order_ids['order_id'])
stale_order_ids = set(sql_order_ids['order_id']) - set(gdax_order_ids)
# Add new
if len(new_order_ids) > 0:
new_orders_df = orders_df[orders_df['order_id'].isin(new_order_ids)]
bot_db.append_if_new('order_id', new_orders_df, 'gdax_order_cur')
# Remove old
if len(stale_order_ids) > 0:
stale_hist = gdax_get_orders(ac, stale_order_ids)
stale_hist = pd.DataFrame(stale_hist)
stale_hist = bot_db.prep_gdax_order_df(stale_hist)
fills_df = get_gdax_fills(ac)
fills_df = add_bot_ids(fills_df)
bot_db.append_if_new('trade_id', fills_df, 'gdax_fill_hist')
bot_db.gdax_delete_open_orders(stale_order_ids, stale_hist)
def update_gdax_transfers_manual(ac):
bot_id = 'manual'
signal_id = 'manual'
my_accounts = ac.get_accounts()
transfer_list = []
for i in my_accounts:
my_id = i['id']
my_cur = i['currency']
gdax_acc_hist = ac.get_account_history(my_id)
gdax_acc_hist = [item for sublist in gdax_acc_hist for item in sublist]
for d in gdax_acc_hist:
if d['type'] == 'transfer':
d['cur'] = my_cur
d = {**d, **d.pop('details', None)}
transfer_list.append(d)
transfer_df = pd.DataFrame(transfer_list)
transfer_df['signal_id'] = signal_id
transfer_df['bot_id'] = bot_id
transfer_df = transfer_df.rename(columns = {'amount': 'transfer_amt', 'id': 'trade_id'})
transfer_df = transfer_df[['transfer_amt', 'created_at', 'cur', 'trade_id', 'transfer_id', 'transfer_type', 'bot_id']]
transfer_df['created_at'] = pd.to_datetime(transfer_df['created_at'])
bot_db.append_if_new('transfer_id', transfer_df, 'gdax_transfer_hist')
def add_bot_ids(fills_df):
aff = bot_db.get_order_aff()
fills_df = | pd.merge(fills_df, aff, how='left', left_on='order_id', right_on='order_id') | pandas.merge |
#!/usr/bin/env python3
# coding: utf-8
import argparse
import json
import logging
import numpy as np
import os
import pandas as pd
import time
from dart_id.align import align
from dart_id.converter import process_files
from dart_id.exceptions import ConfigFileError
from dart_id.fido.BayesianNetwork import run_internal
from dart_id.helper import add_global_args, read_config_file, init_logger, load_params_from_file
from dart_id.models import models, get_model_from_config
from dart_id.report import generate_report
from scipy.stats import norm, lognorm, laplace, bernoulli, uniform
logger = logging.getLogger('root')
def update(dfa, params, config):
dfa = dfa.reset_index(drop=True)
#logger.info('{} / {} ({:.2%}) confident, alignable observations (PSMs) after filtering.'.format(dff.shape[0], dfa.shape[0], dff.shape[0] / dfa.shape[0]))
# refactorize peptide id into stan_peptide_id,
# to preserve continuity when feeding data into STAN
dfa['stan_peptide_id'] = dfa['sequence'].map({
ind: val
for val, ind in enumerate(dfa['sequence'].unique())
})
num_experiments = dfa['exp_id'].max() + 1
num_peptides = dfa['peptide_id'].max() + 1
exp_names = np.sort(dfa['raw_file'].unique())
pep_id_list = dfa['peptide_id'].unique()
# validate parameters file. make sure it is from the same filters
# or else the program will crash in the code below
# check num_experiments, num_peptides
if (
params['exp'].shape[0] != num_experiments or
params['peptide'].shape[0] != (dfa['stan_peptide_id'].max() + 1)
):
error_msg = 'Parameters files have different data than the input data provided. Ensure that both the input list and filters used to generate the alignment parameters and those provided to the current update are the __exact__ same.'
raise ConfigFileError(error_msg)
model = get_model_from_config(config)
# mu from the STAN alignment
dfa['mu'] = params['peptide']['mu'].values[dfa['stan_peptide_id']]
# Join transformation parameters (betas, sigmas)
dfa = (dfa
.join(params['exp'], on='exp_id', how='left', lsuffix='', rsuffix='_right')
.drop(columns='exp_id_right')
)
# predict mus with RTs, and RTs with aligned mus
dfa['mu_pred'] = model['rt_to_ref'](dfa, dfa['mu'], params)
dfa['muij'] = model['ref_to_rt'](dfa, dfa['mu'], params)
dfa['sigmaij'] = model['sigmaij_func'](dfa, params)
# scaled sigma is the same ratio of muij / mu applied to sigmaij
dfa['sigma_pred'] = dfa['sigmaij'] * dfa['mu_pred'] / dfa['muij']
# get parameters for the null distributions for each experiment
null_dists = dfa.groupby('exp_id')['retention_time'].agg([np.mean, np.std])
#null_dists = np.array([norm(loc=null_dists.loc[i, 'mean'], scale=null_dists.loc[i, 'std']) for i in range(0, num_experiments)])
# first column is mean, second is std
null_dists = np.array([null_dists['mean'].values, null_dists['std'].values]).T
# PEP ceiling at 1, otherwise will result in
# incorrect negative densities when plugging into Bayes' theorem
dfa['pep'][dfa['pep'] > 1.0] = 1.0
# output table
df_new = pd.DataFrame()
bootstrap_method = config['bootstrap_method'] if 'bootstrap_method' in config else None
if bootstrap_method == 'none':
bootstrap_method = None
if bootstrap_method is None:
logger.info('Bootstrap method not defined, using point estimates to update confidence instead.')
else:
logger.info('Using \"{}\" bootstrap method'.format(bootstrap_method))
bootstrap_iters = 20 # default
if 'bootstrap_iters' in config:
bootstrap_iters = config['bootstrap_iters']
if bootstrap_method is not None:
logger.info('Using {} bootstrap iterations'.format(bootstrap_iters))
# Calculate the number of observations per peptide
# Used to determine how many draws we need from our distributions
obs_per_peptide = (dfa
.groupby('stan_peptide_id')
.size()
)
max_obs_per_peptide = obs_per_peptide.max()
laplace_pool = laplace.rvs(size=(max_obs_per_peptide * bootstrap_iters))
uniform_pool = uniform.rvs(size=(max_obs_per_peptide * bootstrap_iters))
null_sample_pool = norm.rvs(size=(max_obs_per_peptide * bootstrap_iters))
# Group all data by peptide
dfe = dfa.loc[:, ['stan_peptide_id', 'pep', 'mu_pred', 'mu', 'sigma_pred', 'exp_id']]
dfe_group = dfe.groupby('stan_peptide_id')
# Extract relevant values for each peptide
all_mu_preds = dfe_group['mu_pred'].apply(np.array)
all_mus = dfe_group['mu'].apply(np.array)
all_sigma_preds = dfe_group['sigma_pred'].apply(np.array)
all_peps = dfe_group['pep'].apply(np.array)
all_exp_ids = dfe_group['exp_id'].apply(np.array)
logger.info('Updating PEPs...')
for i, e in enumerate(np.sort(dfa['exp_id'].unique())):
# Timing debugging
time_init = 0
time_loo = 0
time_draw_laplace = 0
time_scale_laplace = 0
time_draw_norm_and_uniform = 0
time_scale_norm_and_uniform = 0
time_sampling_with_replacement = 0
time_medians = 0
time_dist_building = 0
time_bayes = 0
time_append = 0
_time = time.time()
exp_name = exp_names[i]
exp = dfa[dfa['exp_id'] == e]
exp = exp.reset_index(drop=True)
exp_peptides = exp['stan_peptide_id'].unique()
logger.info('Exp ({} / {}) - {} - ({} Peptides, {} PSMs)'.format(i + 1, num_experiments, exp_name, len(exp_peptides), exp.shape[0]))
time_init += (time.time() - _time)
# vector of P(RT|delta=1) for this experiment.
rt_plus = pd.Series(np.zeros(exp.shape[0]))
if bootstrap_method is not None:
_time = time.time()
# to avoid using this experiment's own data to update the confidence
# of its own observations, recalculate the reference RTs (mu) without the
# data from this experiment, by:
# 1) non-parametric bootstrapping over the median of the predicted mus.
# OR
# 2) parametric bootstrapping, using the RT distribution parameters
# Extract relevant values for each peptide
mu_preds = all_mu_preds[exp_peptides]
mus = all_mus[exp_peptides]
sigma_preds = all_sigma_preds[exp_peptides]
peps = all_peps[exp_peptides]
exp_ids = all_exp_ids[exp_peptides]
num_peptides = exp_ids.shape[0]
# Leave out this experiment's observations
leave_out = exp_ids.apply(lambda x: np.array(x == e))
obs_per_pep = exp_ids.apply(len) - leave_out.apply(sum)
def loo(x, y):
return x[~y]
mu_preds = mu_preds.combine(leave_out, loo)
mus = mus.combine(leave_out, loo)
sigma_preds = sigma_preds.combine(leave_out, loo)
peps = peps.combine(leave_out, loo)
exp_ids = exp_ids.combine(leave_out, loo)
# matrix of n by k estimated mus from the bootstrapping
# will iterate over in the loop after the immediate one
mu_k = np.zeros((num_peptides, bootstrap_iters))
time_loo += (time.time() - _time)
for j, stan_peptide_id in enumerate(exp_peptides):
num_obs = obs_per_pep[stan_peptide_id]
# Parametric bootstrap
if (
bootstrap_method == 'parametric' or
bootstrap_method == 'parametric_mixture' or
bootstrap_method == 'parametric-mixture'
):
# Draw num_obs * bootstrap_iters samples
_time = time.time()
pos_samples = laplace_pool[0:(num_obs * bootstrap_iters)].reshape(bootstrap_iters, num_obs)
time_draw_laplace += (time.time() - _time)
_time = time.time()
# Shift and scale sampled RTs by mu and sigma_pred, respectively
pos_samples = (pos_samples * sigma_preds[stan_peptide_id]) + mu_preds[stan_peptide_id]
time_scale_laplace += (time.time() - _time)
if (
bootstrap_method == 'parametric_mixture' or
bootstrap_method == 'parametric-mixture'
):
_time = time.time()
null_samples = null_sample_pool[0:(num_obs * bootstrap_iters)].reshape(bootstrap_iters, num_obs)
coin_flips = uniform_pool[0:(num_obs * bootstrap_iters)].reshape(bootstrap_iters, num_obs)
time_draw_norm_and_uniform += (time.time() - _time)
_time = time.time()
# Shift and scale sampled RTs by mean and std of null dists
null_samples = (null_samples * null_dists[exp_ids[stan_peptide_id], 1]) + null_dists[exp_ids[stan_peptide_id], 0]
fp = coin_flips < np.repeat([peps[stan_peptide_id],], bootstrap_iters, axis=0)
# Overwrite original samples with samples from null distribution
pos_samples[fp] = null_samples[fp]
time_scale_norm_and_uniform += (time.time() - _time)
# Non-parametric bootstrap
elif (
bootstrap_method == 'non-parametric' or
bootstrap_method == 'non_parametric'
):
# Pull random indices from the list of existing predicted mus
# To get random indices, just take N random variates from uniform_pool,
# (Otherwise used for coin flips in parametric bootstrap)
# and multiply by len, then floor, to get a list index
# This is just a cheap way to sample with replacement from the mu_preds
_time = time.time()
# Convert to a numpy array so we can do integer indexing
pos_samples = np.array(mu_preds[stan_peptide_id])[
np.floor(uniform_pool[0:(num_obs * bootstrap_iters)] * num_obs).astype(int)
]
# Reshape into matrix
pos_samples = pos_samples.reshape(bootstrap_iters, num_obs)
time_sampling_with_replacement += (time.time() - _time)
_time = time.time()
# Aggregate all sampled mus and store it in mu_k
if config['mu_estimation'] == 'median':
mu_k[j] = np.median(pos_samples, axis=1)
elif config['mu_estimation'] == 'mean':
mu_k[j] = np.mean(pos_samples, axis=1)
elif config['mu_estimation'] == 'weighted_mean':
# or take the weighted mean
weights = ((1 - np.array(peps[stan_peptide_id])) - (1 - config['pep_threshold'])) / config['pep_threshold']
mu_k[j] = (np.sum(pos_samples * weights, axis=1) / np.sum(weights))
time_medians += (time.time() - _time)
_time = time.time()
# map of stan_peptide_id onto 1:num_peptides
pep_inds = {ind: var for var, ind in enumerate(exp_peptides)}
pep_inds = exp['stan_peptide_id'].map(pep_inds)
# for each bootstrap iteration:
for k in range(0, bootstrap_iters):
# evaluate the transformed RTs (predicted mus) on distributions
# with the bootstrapped, estimated mus as the means.
#rt_plus = rt_plus + laplace.pdf(exp['retention_time'], \
# loc=model['ref_to_rt'](exp, mu_k[:,j][pep_inds], params), \
# scale=exp['sigmaij'])
rt_plus = rt_plus + laplace.pdf(exp['mu_pred'],
loc=mu_k[:, k][pep_inds],
scale=exp['sigma_pred']
)
# divide total likelihood by # of iterations to normalize to area of 1
rt_plus = rt_plus / bootstrap_iters
time_dist_building += (time.time() - _time)
else:
_time = time.time()
# not using bootstrap, but using adjusted mu as a point estimate
# for updating the confidence
rt_plus = model['rt_plus_func'](exp)
time_dist_building += (time.time() - _time)
_time = time.time()
# P(RT|delta=0)*P(delta=0)
# PEP.new = P(delta=0|RT) = ---------------------------------------------------
# P(RT|delta=0)*P(delta=0) + P(RT|delta=1)*P(delta=1)
#
# delta=1 = Correct ID (true positive)
# delta=0 = Incorrect (false positive)
# P(RT|delta=0) = probability of peptides RT, given that PSM is incorrect
# estimate empirical density of RTs over the experiment
rt_minus = model['rt_minus_func'](exp)
# P(delta=0) = probability that PSM is incorrect (PEP)
# P(delta=1) = probability that PSM is correct (1-PEP)
# P(RT|delta=1) = probability that given the correct ID, the RT falls in the
# normal distribution of RTs for that peptide, for that experiment
# delta=1 = Correct ID (true positive)
# delta=0 = Incorrect (false positive)
#
pep_new = (
(rt_minus * exp['pep']) /
((rt_minus * exp['pep']) + (rt_plus * (1.0 - exp['pep'])))
)
time_bayes += (time.time() - _time)
_time = time.time()
# for PSMs for which we have alignment/update data
exp_new = pd.DataFrame({
'rt_minus': rt_minus.tolist(),
'rt_plus': rt_plus.tolist(),
'mu': exp['mu'].values.tolist(),
'muij': exp['muij'].values.tolist(),
'sigmaij': exp['sigmaij'].values.tolist(),
'pep_new': pep_new.tolist(),
'id': exp['id'].values,
'exp_id': exp['exp_id'].values,
'peptide_id': exp['peptide_id'].values,
'stan_peptide_id': exp['stan_peptide_id'].values,
'input_id': exp['input_id'].values,
'exclude': exp['exclude'].values
})
# append to master DataFrame and continue
df_new = df_new.append(exp_new)
time_append += (time.time() - _time)
logger.debug('time_init: {:.1f} ms'.format(time_init*1000))
logger.debug('time_loo (bootstrap only): {:.1f} ms'.format(time_loo*1000))
logger.debug('time_draw_laplace (parametric only): {:.1f} ms'.format(time_draw_laplace*1000))
logger.debug('time_scale_laplace (parametric only): {:.1f} ms'.format(time_scale_laplace*1000))
logger.debug('time_draw_norm_and_uniform (parametric-mixture only): {:.1f} ms'.format(time_draw_norm_and_uniform*1000))
logger.debug('time_scale_norm_and_uniform (parametric-mixture only): {:.1f} ms'.format(time_scale_norm_and_uniform*1000))
logger.debug('time_sampling_with_replacement (non-parametric only): {:.1f} ms'.format(time_sampling_with_replacement*1000))
logger.debug('time_medians (bootstrap only): {:.1f} ms'.format(time_medians*1000))
logger.debug('time_dist_building: {:.1f} ms'.format(time_dist_building*1000))
logger.debug('time_bayes: {:.1f} ms'.format(time_bayes*1000))
logger.debug('time_append: {:.1f} ms'.format(time_append*1000))
# reorder by ID and reset the index
df_new = df_new.sort_values('id')
df_new = df_new.reset_index(drop=True)
return df_new
def write_output(df, out_path, config):
# remove diagnostic columns, unless they are specified to be kept
if 'add_diagnostic_cols' not in config or config['add_diagnostic_cols'] == False:
df_out = df.drop([
'pep_new', 'participated', 'exclude', 'mu', 'muij',
'rt_minus', 'rt_plus', 'sigmaij', 'residual',
'input_id', 'exp_id', 'peptide_id', 'stan_peptide_id'
], axis=1)
# filter by PSM FDR?
if 'psm_fdr_threshold' in config and type(config['psm_fdr_threshold']) == float:
to_remove = (df_out['dart_qval'] > config['psm_fdr_threshold'])
logger.info('{}/{} ({:.2%}) PSMs removed at a threshold of {:.2%} FDR.'.format(np.sum(to_remove), df_out.shape[0], np.sum(to_remove) / df_out.shape[0], config['psm_fdr_threshold']))
df_out = df_out[~to_remove].reset_index(drop=True)
# filter by protein FDR?
if 'protein_fdr_threshold' in config and type(config['protein_fdr_threshold']) == float:
if 'razor_protein_fdr' in df_out.columns:
to_remove = ((df_out['razor_protein_fdr'] > config['protein_fdr_threshold']) | | pd.isnull(df_out['razor_protein_fdr']) | pandas.isnull |
"""
Prepare sample split
Created on 04/10/2020
@author: RH
"""
import os
import pandas as pd
import numpy as np
def set_sep(path, cut=0.3):
trlist = []
telist = []
valist = []
pos = | pd.read_csv('../COVID-CT-MetaInfo.csv', header=0, usecols=['image', 'patient']) | pandas.read_csv |
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
from plotly.subplots import make_subplots
from pathlib import Path
repo_dir = Path(__file__).parent.parent
outputdir = repo_dir/'output'
outputdir.mkdir(parents=True, exist_ok=True)
casos = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto3/TotalesPorRegion_std.csv')
casos['Fecha'] = pd.to_datetime(casos['Fecha'])
casos_sintomaticos = casos[casos['Categoria']=='Casos nuevos con sintomas'].pivot(index='Fecha', columns='Region', values='Total')
casos_nuevos = casos[casos['Categoria']=='Casos nuevos totales'].pivot(index='Fecha', columns='Region', values='Total')
casos_activos_conf = casos[casos['Categoria']=='Casos activos confirmados'].pivot(index='Fecha', columns='Region', values='Total')
casos_activos_prob = casos[casos['Categoria']=='Casos activos probables'].pivot(index='Fecha', columns='Region', values='Total')
casos_nuevos_prob = casos[casos['Categoria']=='Casos probables acumulados'].pivot(index='Fecha', columns='Region', values='Total').diff()
casos_nuevos_antigeno = casos[casos['Categoria']=='Casos nuevos confirmados por antigeno'].pivot(index='Fecha', columns='Region', values='Total')
casos_sintomaticos.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos.rename(columns={'Total': 'Chile'}, inplace=True)
casos_activos_conf.rename(columns={'Total': 'Chile'}, inplace=True)
casos_activos_prob.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_prob.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_antigeno.rename(columns={'Total': 'Chile'}, inplace=True)
casos_nuevos_prob_antigeno = casos_nuevos.add(casos_nuevos_prob, fill_value=0)
casos_nuevos_prob_antigeno = casos_nuevos_prob_antigeno.add(casos_nuevos_antigeno, fill_value=0)
datos_regiones = pd.read_csv('https://raw.githubusercontent.com/ivanMSC/COVID19_Chile/master/utils/regionesChile.csv')
casos_activos = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto46/activos_vs_recuperados.csv')
casos_activos.rename(columns={
'fecha_primeros_sintomas': 'Fecha',
'activos': 'Activos',
'recuperados': 'Recuperados'
}, inplace=True)
casos_activos['Fecha'] = pd.to_datetime(casos_activos['Fecha'])
casos_activos['Activos'] = pd.to_numeric(casos_activos['Activos'])
casos_activos['Recuperados'] = pd.to_numeric(casos_activos['Recuperados'])
casos_activos.set_index('Fecha', inplace=True)
casos_uci = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto8/UCI_T.csv')
casos_uci.rename(columns={'Region': 'Fecha'}, inplace=True)
datos_regiones = pd.merge(datos_regiones, casos_uci.iloc[[0,1]].T, left_on='numTradicional', right_on=0)
datos_regiones.drop(columns=0, inplace=True)
datos_regiones.rename(columns={1: 'Poblacion'}, inplace=True)
casos_uci = casos_uci.iloc[2:]
casos_uci['Fecha'] = pd.to_datetime(casos_uci['Fecha'])
casos_uci.set_index('Fecha', inplace=True)
casos_uci['Chile'] = casos_uci[list(casos_uci.columns)].sum(axis=1)
DP19 = pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto19/CasosActivosPorComuna_std.csv')
activos_dp19 = DP19[DP19['Comuna']=='Total'].pivot(index='Fecha', columns='Codigo region', values='Casos activos').sum(axis=1)
activos_dp19.index = pd.to_datetime(activos_dp19.index)
activos_dp19
DP5 = | pd.read_csv('https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/master/output/producto5/TotalesNacionales_T.csv') | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from six.moves import zip_longest
import copy
import re
from types import GeneratorType
from collections import Counter, defaultdict, Hashable
from unittest import TestCase, main
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import Sequence
from skbio.util import assert_data_frame_almost_equal
from skbio.sequence._sequence import (_single_index_to_slice, _is_single_index,
_as_slice_if_single_index)
class SequenceSubclass(Sequence):
"""Used for testing purposes."""
pass
class TestSequence(TestCase):
def setUp(self):
self.sequence_kinds = frozenset([
str, Sequence, lambda s: np.fromstring(s, dtype='|S1'),
lambda s: np.fromstring(s, dtype=np.uint8)])
def empty_generator():
raise StopIteration()
yield
self.getitem_empty_indices = [
[],
(),
{},
empty_generator(),
# ndarray of implicit float dtype
np.array([]),
np.array([], dtype=int)]
def test_init_default_parameters(self):
seq = Sequence('.ABC123xyz-')
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(11)))
def test_init_nondefault_parameters(self):
seq = Sequence('.ABC123xyz-',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(11)})
npt.assert_equal(seq.values, np.array('.ABC123xyz-', dtype='c'))
self.assertEqual('.ABC123xyz-', str(seq))
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'id': 'foo', 'description': 'bar baz'})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'quality': range(11)}, index=np.arange(11)))
def test_init_handles_missing_metadata_efficiently(self):
seq = Sequence('ACGT')
# metadata attributes should be None and not initialized to a "missing"
# representation
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
# initializing from an existing Sequence object should handle metadata
# attributes efficiently on both objects
new_seq = Sequence(seq)
self.assertIsNone(seq._metadata)
self.assertIsNone(seq._positional_metadata)
self.assertIsNone(new_seq._metadata)
self.assertIsNone(new_seq._positional_metadata)
self.assertFalse(seq.has_metadata())
self.assertFalse(seq.has_positional_metadata())
self.assertFalse(new_seq.has_metadata())
self.assertFalse(new_seq.has_positional_metadata())
def test_init_empty_sequence(self):
# Test constructing an empty sequence using each supported input type.
for s in (b'', # bytes
u'', # unicode
np.array('', dtype='c'), # char vector
np.fromstring('', dtype=np.uint8), # byte vec
Sequence('')): # another Sequence object
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (0, ))
npt.assert_equal(seq.values, np.array('', dtype='c'))
self.assertEqual(str(seq), '')
self.assertEqual(len(seq), 0)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
def test_init_single_character_sequence(self):
for s in (b'A',
u'A',
np.array('A', dtype='c'),
np.fromstring('A', dtype=np.uint8),
Sequence('A')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (1,))
npt.assert_equal(seq.values, np.array('A', dtype='c'))
self.assertEqual(str(seq), 'A')
self.assertEqual(len(seq), 1)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(1)))
def test_init_multiple_character_sequence(self):
for s in (b'.ABC\t123 xyz-',
u'.ABC\t123 xyz-',
np.array('.ABC\t123 xyz-', dtype='c'),
np.fromstring('.ABC\t123 xyz-', dtype=np.uint8),
Sequence('.ABC\t123 xyz-')):
seq = Sequence(s)
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
self.assertEqual(seq.values.shape, (14,))
npt.assert_equal(seq.values,
np.array('.ABC\t123 xyz-', dtype='c'))
self.assertEqual(str(seq), '.ABC\t123 xyz-')
self.assertEqual(len(seq), 14)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(14)))
def test_init_from_sequence_object(self):
# We're testing this in its simplest form in other tests. This test
# exercises more complicated cases of building a sequence from another
# sequence.
# just the sequence, no other metadata
seq = Sequence('ACGT')
self.assertEqual(Sequence(seq), seq)
# sequence with metadata should have everything propagated
seq = Sequence('ACGT',
metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(Sequence(seq), seq)
# should be able to override metadata
self.assertEqual(
Sequence(seq, metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}),
Sequence('ACGT', metadata={'id': 'abc', 'description': '123'},
positional_metadata={'quality': [42] * 4}))
# subclasses work too
seq = SequenceSubclass('ACGT',
metadata={'id': 'foo',
'description': 'bar baz'},
positional_metadata={'quality': range(4)})
self.assertEqual(
Sequence(seq),
Sequence('ACGT', metadata={'id': 'foo', 'description': 'bar baz'},
positional_metadata={'quality': range(4)}))
def test_init_from_contiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[:3]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('A*B'))
# we shouldn't own the memory because no copy should have been made
self.assertFalse(seq._owns_bytes)
# can't mutate view because it isn't writeable anymore
with self.assertRaises(ValueError):
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('A*B'))
# mutate bytes (*not* the view)
bytes[0] = 99
# Sequence changed because we are only able to make the view read-only,
# not its source (bytes). This is somewhat inconsistent behavior that
# is (to the best of our knowledge) outside our control.
self.assertEqual(seq, Sequence('c*B'))
def test_init_from_noncontiguous_sequence_bytes_view(self):
bytes = np.array([65, 42, 66, 42, 65], dtype=np.uint8)
view = bytes[::2]
seq = Sequence(view)
# sequence should be what we'd expect
self.assertEqual(seq, Sequence('ABA'))
# we should own the memory because a copy should have been made
self.assertTrue(seq._owns_bytes)
# mutate bytes and its view
bytes[0] = 99
view[1] = 100
# sequence shouldn't have changed
self.assertEqual(seq, Sequence('ABA'))
def test_init_no_copy_of_sequence(self):
bytes = np.array([65, 66, 65], dtype=np.uint8)
seq = Sequence(bytes)
# should share the same memory
self.assertIs(seq._bytes, bytes)
# shouldn't be able to mutate the Sequence object's internals by
# mutating the shared memory
with self.assertRaises(ValueError):
bytes[1] = 42
def test_init_empty_metadata(self):
for empty in None, {}:
seq = Sequence('', metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
def test_init_empty_metadata_key(self):
seq = Sequence('', metadata={'': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'': ''})
def test_init_empty_metadata_item(self):
seq = Sequence('', metadata={'foo': ''})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': ''})
def test_init_single_character_metadata_item(self):
seq = Sequence('', metadata={'foo': 'z'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': 'z'})
def test_init_multiple_character_metadata_item(self):
seq = Sequence('', metadata={'foo': '\nabc\tdef G123'})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, {'foo': '\nabc\tdef G123'})
def test_init_metadata_multiple_keys(self):
seq = Sequence('', metadata={'foo': 'abc', 42: {'nested': 'metadata'}})
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata,
{'foo': 'abc', 42: {'nested': 'metadata'}})
def test_init_empty_positional_metadata(self):
# empty seq with missing/empty positional metadata
for empty in None, {}, pd.DataFrame():
seq = Sequence('', positional_metadata=empty)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(0)))
# non-empty seq with missing positional metadata
seq = Sequence('xyz', positional_metadata=None)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_positional_metadata())
assert_data_frame_almost_equal(seq.positional_metadata,
pd.DataFrame(index=np.arange(3)))
def test_init_empty_positional_metadata_item(self):
for item in ([], (), np.array([])):
seq = Sequence('', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(0)))
def test_init_single_positional_metadata_item(self):
for item in ([2], (2, ), np.array([2])):
seq = Sequence('G', positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(1)))
def test_init_multiple_positional_metadata_item(self):
for item in ([0, 42, 42, 1, 0, 8, 100, 0, 0],
(0, 42, 42, 1, 0, 8, 100, 0, 0),
np.array([0, 42, 42, 1, 0, 8, 100, 0, 0])):
seq = Sequence('G' * 9, positional_metadata={'foo': item})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': item}, index=np.arange(9)))
def test_init_positional_metadata_multiple_columns(self):
seq = Sequence('^' * 5,
positional_metadata={'foo': np.arange(5),
'bar': np.arange(5)[::-1]})
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_positional_metadata_with_custom_index(self):
df = pd.DataFrame({'foo': np.arange(5), 'bar': np.arange(5)[::-1]},
index=['a', 'b', 'c', 'd', 'e'])
seq = Sequence('^' * 5, positional_metadata=df)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
self.assertTrue(seq.has_positional_metadata())
assert_data_frame_almost_equal(
seq.positional_metadata,
pd.DataFrame({'foo': np.arange(5),
'bar': np.arange(5)[::-1]}, index=np.arange(5)))
def test_init_invalid_sequence(self):
# invalid dtype (numpy.ndarray input)
with self.assertRaises(TypeError):
# int64
Sequence(np.array([1, 2, 3]))
with self.assertRaises(TypeError):
# |S21
Sequence(np.array([1, "23", 3]))
with self.assertRaises(TypeError):
# object
Sequence(np.array([1, {}, ()]))
# invalid input type (non-numpy.ndarray input)
with self.assertRaisesRegexp(TypeError, 'tuple'):
Sequence(('a', 'b', 'c'))
with self.assertRaisesRegexp(TypeError, 'list'):
Sequence(['a', 'b', 'c'])
with self.assertRaisesRegexp(TypeError, 'set'):
Sequence({'a', 'b', 'c'})
with self.assertRaisesRegexp(TypeError, 'dict'):
Sequence({'a': 42, 'b': 43, 'c': 44})
with self.assertRaisesRegexp(TypeError, 'int'):
Sequence(42)
with self.assertRaisesRegexp(TypeError, 'float'):
Sequence(4.2)
with self.assertRaisesRegexp(TypeError, 'int64'):
Sequence(np.int_(50))
with self.assertRaisesRegexp(TypeError, 'float64'):
Sequence(np.float_(50))
with self.assertRaisesRegexp(TypeError, 'Foo'):
class Foo(object):
pass
Sequence(Foo())
# out of ASCII range
with self.assertRaises(UnicodeEncodeError):
Sequence(u'abc\u1F30')
def test_init_invalid_metadata(self):
for md in (0, 'a', ('f', 'o', 'o'), np.array([]), pd.DataFrame()):
with self.assertRaisesRegexp(TypeError,
'metadata must be a dict'):
Sequence('abc', metadata=md)
def test_init_invalid_positional_metadata(self):
# not consumable by Pandas
with self.assertRaisesRegexp(TypeError,
'Positional metadata invalid. Must be '
'consumable by pd.DataFrame. '
'Original pandas error message: '):
Sequence('ACGT', positional_metadata=2)
# 0 elements
with self.assertRaisesRegexp(ValueError, '\(0\).*\(4\)'):
Sequence('ACGT', positional_metadata=[])
# not enough elements
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4])
# too many elements
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=[2, 3, 4, 5, 6])
# Series not enough rows
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(3)))
# Series too many rows
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT', positional_metadata=pd.Series(range(5)))
# DataFrame not enough rows
with self.assertRaisesRegexp(ValueError, '\(3\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(3)}))
# DataFrame too many rows
with self.assertRaisesRegexp(ValueError, '\(5\).*\(4\)'):
Sequence('ACGT',
positional_metadata=pd.DataFrame({'quality': range(5)}))
def test_values_property(self):
# Property tests are only concerned with testing the interface
# provided by the property: that it can be accessed, can't be
# reassigned or mutated in place, and that the correct type is
# returned. More extensive testing of border cases (e.g., different
# sequence lengths or input types, odd characters, etc.) are performed
# in Sequence.__init__ tests.
seq = Sequence('ACGT')
# should get back a numpy.ndarray of '|S1' dtype
self.assertIsInstance(seq.values, np.ndarray)
self.assertEqual(seq.values.dtype, '|S1')
npt.assert_equal(seq.values, np.array('ACGT', dtype='c'))
# test that we can't mutate the property
with self.assertRaises(ValueError):
seq.values[1] = 'A'
# test that we can't set the property
with self.assertRaises(AttributeError):
seq.values = np.array("GGGG", dtype='c')
def test_metadata_property_getter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertIsInstance(seq.metadata, dict)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# update existing key
seq.metadata['foo'] = 'baz'
self.assertEqual(seq.metadata, {'foo': 'baz'})
# add new key
seq.metadata['foo2'] = 'bar2'
self.assertEqual(seq.metadata, {'foo': 'baz', 'foo2': 'bar2'})
def test_metadata_property_getter_missing(self):
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
self.assertEqual(seq.metadata, {})
self.assertIsNotNone(seq._metadata)
def test_metadata_property_setter(self):
md = {'foo': 'bar'}
seq = Sequence('', metadata=md)
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
new_md = {'bar': 'baz', 42: 42}
seq.metadata = new_md
self.assertEqual(seq.metadata, new_md)
self.assertIsNot(seq.metadata, new_md)
seq.metadata = {}
self.assertEqual(seq.metadata, {})
self.assertFalse(seq.has_metadata())
def test_metadata_property_setter_invalid_type(self):
seq = Sequence('abc', metadata={123: 456})
for md in (None, 0, 'a', ('f', 'o', 'o'), np.array([]),
pd.DataFrame()):
with self.assertRaisesRegexp(TypeError,
'metadata must be a dict'):
seq.metadata = md
# object should still be usable and its original metadata shouldn't
# have changed
self.assertEqual(seq.metadata, {123: 456})
def test_metadata_property_deleter(self):
md = {'foo': 'bar'}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting again
del seq.metadata
self.assertIsNone(seq._metadata)
self.assertFalse(seq.has_metadata())
self.assertEqual(seq.metadata, {})
# test deleting missing metadata immediately after instantiation
seq = Sequence('ACGT')
self.assertIsNone(seq._metadata)
del seq.metadata
self.assertIsNone(seq._metadata)
def test_metadata_property_shallow_copy(self):
md = {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]}
seq = Sequence('CAT', metadata=md)
self.assertTrue(seq.has_metadata())
self.assertEqual(seq.metadata, md)
self.assertIsNot(seq.metadata, md)
# updates to keys
seq.metadata['key1'] = 'new val'
self.assertEqual(seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2]})
# original metadata untouched
self.assertEqual(md, {'key1': 'val1', 'key2': 'val2', 'key3': [1, 2]})
# updates to mutable value (by reference)
seq.metadata['key3'].append(3)
self.assertEqual(
seq.metadata,
{'key1': 'new val', 'key2': 'val2', 'key3': [1, 2, 3]})
# original metadata changed because we didn't deep copy
self.assertEqual(
md,
{'key1': 'val1', 'key2': 'val2', 'key3': [1, 2, 3]})
def test_positional_metadata_property_getter(self):
md = pd.DataFrame({'foo': [22, 22, 0]})
seq = Sequence('ACA', positional_metadata=md)
assert_data_frame_almost_equal(seq.positional_metadata,
| pd.DataFrame({'foo': [22, 22, 0]}) | pandas.DataFrame |
import random
import os
import pandas as pd
from datetime import timedelta
from logbook import TestHandler
from pandas.util.testing import assert_frame_equal
from catalyst import get_calendar
from catalyst.exchange.exchange_asset_finder import ExchangeAssetFinder
from catalyst.exchange.exchange_data_portal import DataPortalExchangeBacktest
from catalyst.exchange.utils.exchange_utils import get_candles_df
from catalyst.exchange.utils.factory import get_exchange
from catalyst.exchange.utils.test_utils import output_df, \
select_random_assets
from catalyst.exchange.utils.stats_utils import set_print_settings
pd.set_option('display.expand_frame_repr', False)
| pd.set_option('precision', 8) | pandas.set_option |
import numpy as np
import pandas as pd
a = np.arange(4)
print(a)
# [0 1 2 3]
s = pd.Series(a)
print(s)
# 0 0
# 1 1
# 2 2
# 3 3
# dtype: int64
index = ['A', 'B', 'C', 'D']
name = 'sample'
s = pd.Series(data=a, index=index, name=name, dtype='float')
print(s)
# A 0.0
# B 1.0
# C 2.0
# D 3.0
# Name: sample, dtype: float64
a = np.arange(12).reshape((4, 3))
print(a)
# [[ 0 1 2]
# [ 3 4 5]
# [ 6 7 8]
# [ 9 10 11]]
# s = pd.Series(a)
# print(s)
# Exception: Data must be 1-dimensional
s = pd.Series(a[2])
print(s)
# 0 6
# 1 7
# 2 8
# dtype: int64
s = pd.Series(a.T[2])
print(s)
# 0 2
# 1 5
# 2 8
# 3 11
# dtype: int64
a = np.arange(12).reshape((4, 3))
print(a)
# [[ 0 1 2]
# [ 3 4 5]
# [ 6 7 8]
# [ 9 10 11]]
df = | pd.DataFrame(a) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas.core.sparse.api import SparseDtype
@pytest.mark.parametrize("dtype, fill_value", [
('int', 0),
('float', np.nan),
('bool', False),
('object', np.nan),
('datetime64[ns]', pd.NaT),
('timedelta64[ns]', pd.NaT),
])
def test_inferred_dtype(dtype, fill_value):
sparse_dtype = SparseDtype(dtype)
result = sparse_dtype.fill_value
if pd.isna(fill_value):
assert pd.isna(result) and type(result) == type(fill_value)
else:
assert result == fill_value
def test_from_sparse_dtype():
dtype = SparseDtype('float', 0)
result = SparseDtype(dtype)
assert result.fill_value == 0
def test_from_sparse_dtype_fill_value():
dtype = SparseDtype('int', 1)
result = SparseDtype(dtype, fill_value=2)
expected = SparseDtype('int', 2)
assert result == expected
@pytest.mark.parametrize('dtype, fill_value', [
('int', None),
('float', None),
('bool', None),
('object', None),
('datetime64[ns]', None),
('timedelta64[ns]', None),
('int', np.nan),
('float', 0),
])
def test_equal(dtype, fill_value):
a = SparseDtype(dtype, fill_value)
b = SparseDtype(dtype, fill_value)
assert a == b
assert b == a
def test_nans_equal():
a = SparseDtype(float, float('nan'))
b = SparseDtype(float, np.nan)
assert a == b
assert b == a
@pytest.mark.parametrize('a, b', [
(SparseDtype('float64'), SparseDtype('float32')),
(SparseDtype('float64'), SparseDtype('float64', 0)),
(SparseDtype('float64'), SparseDtype('datetime64[ns]', np.nan)),
(SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)),
(SparseDtype('float64'), np.dtype('float64')),
])
def test_not_equal(a, b):
assert a != b
def test_construct_from_string_raises():
with pytest.raises(TypeError):
SparseDtype.construct_from_string('not a dtype')
@pytest.mark.parametrize("dtype, expected", [
(SparseDtype(int), True),
(SparseDtype(float), True),
(SparseDtype(bool), True),
(SparseDtype(object), False),
(SparseDtype(str), False),
])
def test_is_numeric(dtype, expected):
assert dtype._is_numeric is expected
def test_str_uses_object():
result = SparseDtype(str).subtype
assert result == np.dtype('object')
@pytest.mark.parametrize("string, expected", [
('Sparse[float64]', SparseDtype(np.dtype('float64'))),
('Sparse[float32]', SparseDtype(np.dtype('float32'))),
('Sparse[int]', SparseDtype(np.dtype('int'))),
('Sparse[str]', SparseDtype(np.dtype('str'))),
('Sparse[datetime64[ns]]', SparseDtype(np.dtype('datetime64[ns]'))),
("Sparse", SparseDtype(np.dtype("float"), np.nan))
])
def test_construct_from_string(string, expected):
result = SparseDtype.construct_from_string(string)
assert result == expected
@pytest.mark.parametrize("a, b, expected", [
(SparseDtype(float, 0.0), SparseDtype(np.dtype('float'), 0.0), True),
(SparseDtype(int, 0), SparseDtype(int, 0), True),
(SparseDtype(float, float('nan')), SparseDtype(float, np.nan), True),
(SparseDtype(float, 0), SparseDtype(float, np.nan), False),
( | SparseDtype(int, 0.0) | pandas.core.sparse.api.SparseDtype |
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import Series, Timestamp
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize("val,expected", [
(2**63 - 1, 3),
(2**63, 4),
])
def test_loc_uint64(val, expected):
# see gh-19399
s = Series({2**63 - 1: 3, 2**63: 4})
assert s.loc[val] == expected
def test_loc_getitem(test_data):
inds = test_data.series.index[[3, 4, 7]]
assert_series_equal(
test_data.series.loc[inds],
test_data.series.reindex(inds))
assert_series_equal(test_data.series.iloc[5::2], test_data.series[5::2])
# slice with indices
d1, d2 = test_data.ts.index[[5, 15]]
result = test_data.ts.loc[d1:d2]
expected = test_data.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = test_data.series > test_data.series.median()
assert_series_equal(test_data.series.loc[mask], test_data.series[mask])
# ask for index value
assert test_data.ts.loc[d1] == test_data.ts[d1]
assert test_data.ts.loc[d2] == test_data.ts[d2]
def test_loc_getitem_not_monotonic(test_data):
d1, d2 = test_data.ts.index[[5, 15]]
ts2 = test_data.ts[::2][[1, 2, 0]]
msg = r"Timestamp\('2000-01-10 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2]
with pytest.raises(KeyError, match=msg):
ts2.loc[d1:d2] = 0
def test_loc_getitem_setitem_integer_slice_keyerrors():
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11]
with pytest.raises(KeyError, match=r"^3L?$"):
s2.loc[3:11] = 0
def test_loc_getitem_iterator(test_data):
idx = iter(test_data.series.index[:10])
result = test_data.series.loc[idx]
assert_series_equal(result, test_data.series[:10])
def test_loc_setitem_boolean(test_data):
mask = test_data.series > test_data.series.median()
result = test_data.series.copy()
result.loc[mask] = 0
expected = test_data.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_loc_setitem_corner(test_data):
inds = list(test_data.series.index[[5, 8, 12]])
test_data.series.loc[inds] = 5
msg = r"\['foo'\] not in index"
with pytest.raises(KeyError, match=msg):
test_data.series.loc[inds + ['foo']] = 5
def test_basic_setitem_with_labels(test_data):
indices = test_data.ts.index[[5, 10, 15]]
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = test_data.ts.copy()
exp = test_data.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
msg = r"\[5\] not contained in the index"
with pytest.raises(ValueError, match=msg):
s[inds_notfound] = 0
with pytest.raises(Exception, match=msg):
s[arr_inds_notfound] = 0
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = | Timestamp('2011-01-03', tz='US/Eastern') | pandas.Timestamp |
#!/usr/bin/env python
import time
import math
import re
import pandas as pd
from pathlib import Path
import numpy as np
import subprocess
from difflib import unified_diff, Differ
from mirge.libs.miRgeEssential import UID
from mirge.libs.bamFmt import sam_header, bow2bam, createBAM
from mirge.libs.mirge2_tRF_a2i import trna_deliverables, a2i_editing
import os, sys
from mirge.classes.exportHTML import FormatJS
"""
THIS SCRIPT CONTAINS LOTS OF PANDAS FUNCTION TO DERIVE THE SUMMARY (EXCEPT FOR GFF-FUNCTION)
IF YOU ARE A DEVELOPER, AND WANT TO UNDERSTAND THIS SCRIPT!! I WOULD RECOMMEND YOU TO BE THOROUGH WITH pandas FUNCTIONS
THIS FUNCTION RETURNS THE FOLLOWING FILES AS OUTPUT:
miR.Counts
miR.RPM
annotation.report
"""
def mirge_can(can, iso, df, ca_thr, file_name):
"""
THIS FUNCTION TAKES Exact miRNA & isomiRs FOR EACH SAMPLE and CALCULATES CANNONICAL RATIO
"""
# MIRGING TAKES PLACE FOR ALL THE ELEMENTS OF ALL exact miRNA LEAVING BEHIND isomiRs WHICH IS NOT IN exact miRNA
merged_left = pd.merge(left=can,right=iso, how='left', left_on='exact miRNA', right_on='isomiR miRNA')
merged_left = merged_left.fillna(0)
# IN PANDAS, IF THE COLUMN NAME IS SAME, IT WILL REPLACE THE NAME WITH _x AND _y
file_nameX = str(file_name)+"_x"
file_nameY = str(file_name)+"_y"
merged_left[file_nameY] = merged_left[file_nameY].astype(int)
merged_left.loc[merged_left[file_nameX] < 2, [file_nameX]] = 0 # REPLACE THE exact miRNA COUNT WITH ZERO IF exact miRNA < 2
merged_left.loc[merged_left[file_nameX] < 2, [file_nameY]] = 0 # REPLACE THE isomiR COUNT WITH ZERO IF exact miRNA < 2
merged_left.loc[merged_left[file_nameY] > 0, 'ratio'] = merged_left[file_nameX]/merged_left[file_nameY] #CREATES A COLUMN CALLED RATIO IF DENOMINATOR IS NOT ZERO
merged_left.loc[merged_left[file_nameY] == 0, 'ratio'] = merged_left[file_nameX] #ASSIGNS exact miRNA COUNTS AS RATIO IF DENOMINATOR IS ZERO
cols = [file_nameX, file_nameY]
merged_left[file_name] = merged_left.loc[merged_left['ratio'] > ca_thr, cols].sum(axis=1)
merged_left[file_name].fillna(0, inplace=True)
df = df.join(merged_left[file_name], how='outer')
del merged_left
return df
def create_gff(args, pre_mirDict, mirDict, d, filenamegff, cannonical, isomirs, base_names, ref_db, annotation_lib, workDir, mirRPM_completeSet):
cols1 = ["Sequence","exact miRNA"] + base_names
cols2 = ["Sequence","isomiR miRNA"] + base_names
canonical_gff = pd.DataFrame(cannonical, columns= cols1).values.tolist() # Gives list of list containg Sequence, miRNA name, expression values for the samples - ref miRNA
isomir_gff = pd.DataFrame(isomirs, columns= cols2).values.tolist() # Gives list of list containg Sequence, miRNA name, expression values for the samples - isomiR
#can_gff_df = pd.DataFrame(cannonical, columns= cols1) # Gives list of list containg Sequence, miRNA name, expression values for the samples - ref miRNA
#iso_gff_df = pd.DataFrame(isomirs, columns= cols2) # Gives list of list containg Sequence, miRNA name, expression values for the samples - isomiR
#canonical_gff = can_gff_df.values.tolist()
#isomir_gff = iso_gff_df.values.tolist()
canonical_gff.extend(isomir_gff) # APPENDING THE LIST OF ISOMIRS TO CANONICAL # Making one big list to get coordinates and anntations for GFF3 format of miRTop
gffwrite = open(filenamegff, "w+") # creating file to write the gff output # sample_miRge3.gff
gffwrite.write("# GFF3 adapted for miRNA sequencing data\n")
gffwrite.write("## VERSION 0.0.1\n")
version_db = "miRBase22" if ref_db == "miRBase" else "MirGeneDB2.0"
gffwrite.write("## source-ontology: " + version_db + "\n")
gffwrite.write("## COLDATA: "+ ",".join(str(nm) for nm in base_names) + "\n")
#forJSgffData = open(str(Path(workDir)/"gff_data.csv"), "w+")
JS_hmap_iso_miRcounts = dict()
JS_hmap_iso_miRVar = dict()
JS_hmap_list_ref = dict()
JS_filenames = ",".join(str(nm) for nm in base_names)
#JS_headername = 'Seq,miR,var,'+JS_filenames+',Total\n'
#forJSgffData.write(JS_headername)
# GFF3 adapted for miRNA sequencing data")
start=0
end=0
parent="-"
filter_var = "Pass"
strand = "+"
dots = "."
precursorSeq = uid_val = ""
"""
READING ANNOTATION DATA TO GET GENOMIC COORDINATES AND PRECURSOR miRNA
"""
if args.bam_out:
header = sam_header(args)
for names in base_names:
file_sam_nameH = str(names) +".sam"
sam_nameH = Path(workDir)/file_sam_nameH
with open(sam_nameH,"w+") as samH:
#samH.write("@HD\tVN:3.0\tSO:coordinate\n")
samH.write(header)
pre_cur_name={}
pre_strand={}
mature_chromosome={}
mature_cor_start={}
mature_cor_end={}
mature_strand = {}
with open(annotation_lib) as alib: # Reading annotations GTF from miRBase or miRGeneDB based on user and gather coordinates and name and sequence of the precursor miRNA
for annlib in alib:
annlib = annlib.strip()
annlib_list = annlib.split("\t")
try:
if ref_db == "MirGeneDB":
if annlib_list[2] == "pre_miRNA":
pre_name = annlib_list[8].split(";")[0]
pre_name = pre_name.replace("ID=","")
pre_strand[pre_name] = annlib_list[6]
#print(pre_name)
else:
mature_name = annlib_list[8].split(";")[0]
mature_name = mature_name.replace("ID=","")
#print(mature_name)
if mature_name not in pre_cur_name:
pre_cur_name[mature_name] = pre_name
mature_chromosome[mature_name] = annlib_list[0]
mature_cor_start[mature_name] = annlib_list[3]
mature_cor_end[mature_name] = annlib_list[4]
mature_strand[mature_name] = annlib_list[6] # Genomic strand
else:
if annlib_list[2] == "miRNA_primary_transcript":
pre_name = annlib_list[8].split(";")[-1]
pre_name = pre_name.replace("Name=","")
pre_strand[pre_name] = annlib_list[6]
else:
mature_name = annlib_list[8].split(";")[2]
mature_name = mature_name.replace("Name=","")
if mature_name not in pre_cur_name:
pre_cur_name[mature_name] = pre_name
mature_chromosome[mature_name] = annlib_list[0] # Chromosome location
mature_cor_start[mature_name] = annlib_list[3] # Genomic coordinates and not miRNA seq to precursor sequence
mature_cor_end[mature_name] = annlib_list[4] # Genomic coordinates of miRNA and not its position w.r.t precursor sequence
mature_strand[mature_name] = annlib_list[6] # Genomic strand
except IndexError:
pass
#print(pre_cur_name)
bam_can_dict={}
bam_expression_dict={}
JS_variantType_dataDict = dict()
for cans in canonical_gff:
gen_start=0
gen_end=0
seq_m = cans[0] # Sequence from datasest/pandas
if "." in cans[1]:
seq_master = cans[1].split(".")[0] # miRNA name with .SNP extension
else:
seq_master = cans[1] # miRNA name
canonical_expression = ','.join(str(x) for x in cans[2:]) # Expression/collapsed counts for each sample - joining by ','
JS_exprn_total = str(sum(cans[2:]))
bam_expression_dict[seq_m] = [int(x) for x in cans[2:]]
new_string=""
#print(seq_master)
try:
if seq_master in pre_cur_name:
master_seq = mirDict[seq_master] # Fetch mature miRNA sequence
req_precursor_name = pre_cur_name[seq_master] # Fetch name of the corresponding precursor miRNA name
gen_chr = mature_chromosome[seq_master]
gen_start = int(mature_cor_start[seq_master])
gen_end = int(mature_cor_end[seq_master])
gen_strand = mature_strand[seq_master]
#print(master_seq, req_precursor_name, gen_chr, gen_start, gen_end, gen_strand)
else:
seq_master = seq_master.replace("-3p","")
seq_master = seq_master.replace("-5p","")
seq_master = seq_master.replace("-3p*","")
seq_master = seq_master.replace("-5p*","")
#print(seq_master)
master_seq = mirDict[seq_master] # Fetch mature miRNA sequence
req_precursor_name = pre_cur_name[seq_master] # Fetch name of the corresponding precursor miRNA name
#print(req_precursor_name)
gen_chr = mature_chromosome[seq_master]
#print(gen_chr)
gen_start = int(mature_cor_start[seq_master])
gen_end = int(mature_cor_end[seq_master])
gen_strand = mature_strand[seq_master]
#print(gen_start, gen_end, gen_strand)
#print(master_seq, req_precursor_name, gen_chr, gen_start, gen_end, gen_strand)
#print(req_precursor_name)
precursorSeq = pre_mirDict[req_precursor_name] # # Fetch sequence of the corresponding precursor miRNA
#print(precursorSeq)
if precursorSeq != "":
start = precursorSeq.find(master_seq) + 1 # This is to calculate coordinates of the mature miRNA seq wrt precursor
end = start + len(master_seq) - 1
else:
start = 1 # Well, if the coordinates are not availble, I will put them as 1 to length of seq. Although this case is rare to none
end = start + len(master_seq) - 1
#print()
#print(precursorSeq+"\n"+master_seq+"\n")
#print("2: "+ seq_m + " "+ str(start)+ " " + str(gen_start))
if seq_m == master_seq: # If mature miRNA sequence is same as query FASTQ sequence, then it is reference miRNA
type_rna = "ref_miRNA"
if "N" not in seq_m:
uid_val = UID(seq_m, "ref") # It is the Unique identifier, this function is present miRgeEssential.py
else:
uid_val = "." # If the sequence contains ambiguous bases (N), then UID is replaced by dot (.)
cigar = str(len(seq_m))+"M" # CIGAR format for ref_miRNA is complete match, i.e., length of miRNA and M for example 25M (meaning 25 bases match)
# Finally to write GFF output for ref_miRNA
mi_var = seq_master+"\t"+version_db+"\t"+type_rna+"\t"+str(start)+"\t"+str(end)+"\t.\t+\t.\tRead="+seq_m+"; UID="+uid_val+"; Name="+ seq_master +"; Parent="+req_precursor_name+"; Variant=NA; Cigar="+cigar+"; Expression="+canonical_expression +"; Filter=Pass; Hits="+ canonical_expression + "\n"
#print(mi_var)
gffwrite.write(mi_var)
#JS_hmap_list_ref.append([seq_master, "ref", canonical_expression])
#JS_hmap_list_ref.append([seq_master, "ref"] + canonical_expression.split(","))
try:
JS_hmap_list_ref[seq_master].append(canonical_expression.split(","))
except KeyError:
JS_hmap_list_ref[seq_master] = canonical_expression.split(",")
#forJSgffData.write(str(seq_m)+","+seq_master+",ref,"+str(canonical_expression)+","+JS_exprn_total+"\n")
bam_can_dict[seq_m] = str(gen_chr) +"\t"+ str(gen_start) +"\t"+ cigar + "\t"+ gen_strand
#bow2bam
"""
FOR SAM/BAM FILE FORMAT: gen_chr, gen_start, gen_end
"""
else: # If mature miRNA sequence is same as query FASTQ sequence, then it is reference miRNA else it is an isomiR
type_rna = "isomiR"
if "N" not in seq_m:
uid_val = UID(seq_m, "iso")
else:
uid_val = "."
result = list(d.compare(master_seq, seq_m)) # Python function difflib - Differ to detect changes between two strings
re_len = len(master_seq)
variant=""
master_variant = []
#print()
#print("--**START**--")
master_seq_bc = list(master_seq)
result_seq_bc = result
for mdx, mbases in enumerate(master_seq_bc):
if result_seq_bc[mdx].startswith("-"):
pass # Thought of something and left it as placeholder
elif result_seq_bc[mdx].startswith("+"):
master_seq_bc.insert(mdx,'-') # Inserting '-' in the reference sequence if there is a change is base or insertion in the sequence
result_seq_bc = [bc.replace(" ", "") for bc in result_seq_bc if not bc.startswith("-")] # Cleaning the results and removing extra spaces obtained from difflib - Differ
#print("--**MID**--")
sub=[]
for idx, bases in enumerate(result): # Creating an arrays if the reference now starts with '-', append "_" at that index position, etc and making two arrays of same lenght with variations
if bases.startswith("-"):
sub.append("_")
elif bases.startswith("+"):
sub.append(bases.replace(" ", ""))
else:
sub.append(bases.replace(" ",""))
diff = len(sub) - len(master_seq_bc)
for x in range(diff):
master_seq_bc.append("-")
# print(master_seq_bc)
# print(sub)
for yidx, ys in enumerate(master_seq_bc): # <FORWARD> For upto 2 bases, find variants/base changes as shown in below comment A>T,T>G,T>G and basically delete '-' and "_" from two arrays
if yidx > 0:
try:
if ys == "-" and sub[yidx-1] == "_":
if yidx-2 > 0 and sub[yidx-2] == "_" and master_seq_bc[yidx+1] == "-":
del master_seq_bc[yidx:yidx+2]
del sub[yidx-2:yidx]
#['A', 'A', 'A', 'C', 'C', 'G', 'T', 'T', 'A', '-', 'C', 'C', 'A', 'T', 'T', 'A', 'C', 'T', 'G', 'A', 'G', 'T', 'T', '-', '-']
#['A', 'A', 'A', 'C', 'C', 'G', 'T', 'T', '_', '+T', 'C', 'C', 'A', 'T', 'T', 'A', 'C', 'T', 'G', '_', 'G', '_', '_', '+G', '+G']
else:
temp_1 = master_seq_bc.pop(yidx)
temp_2 = sub.pop(yidx-1)
#AAACCGTTTCCATTACTGGGG - This is the output of the loop
#['A', 'A', 'A', 'C', 'C', 'G', 'T', 'T', 'A', 'C', 'C', 'A', 'T', 'T', 'A', 'C', 'T', 'G', 'A', 'G', 'T', 'T']
#['A', 'A', 'A', 'C', 'C', 'G', 'T', 'T', '+T', 'C', 'C', 'A', 'T', 'T', 'A', 'C', 'T', 'G', '_', 'G', '+G', '+G']
except IndexError:
pass
for yidx, ys in enumerate(master_seq_bc): # <REVERSE> For upto 2 bases, find variants/base changes as shown in below comment C>T,A>_ and basically delete '-' and "_" from two arrays
if yidx > 0:
try:
if ys == "-" and sub[yidx+1] == "_":
if yidx+2 <= len(sub) and sub[yidx+2] == "_" and master_seq_bc[yidx+1] == "-":
del master_seq_bc[yidx:yidx+2]
del sub[yidx:yidx+2]
#['-', 'A', 'A', 'C', 'G', 'G', 'C', 'A', 'A', 'T', 'G', 'A', 'C', 'T', 'T', 'T', 'T', 'G', 'T', 'A', 'C', '-', 'C', 'A']
#['+A', 'A', 'A', 'C', 'G', 'G', 'C', 'A', 'A', 'T', 'G', 'A', 'C', 'T', 'T', 'T', 'T', 'G', 'T', 'A', 'C', '+T', '_', '_']
else:
temp_1 = master_seq_bc.pop(yidx)
temp_2 = sub.pop(yidx+1)
# hsa-miR-548al AACGGCAATGACTTTTGTACCA AAACGGCAATGACTTTTGTACT
#['-', 'A', 'A', 'C', 'G', 'G', 'C', 'A', 'A', 'T', 'G', 'A', 'C', 'T', 'T', 'T', 'T', 'G', 'T', 'A', 'C', 'C', 'A']
#['+A', 'A', 'A', 'C', 'G', 'G', 'C', 'A', 'A', 'T', 'G', 'A', 'C', 'T', 'T', 'T', 'T', 'G', 'T', 'A', 'C', '+T', '_']
except IndexError:
pass
#print(master_seq_bc)
#print(sub)
"""
['T', 'T', 'T', 'T', 'T', 'C', 'A', 'T', 'T', 'A', 'T', 'T', 'G', 'C', '-', 'T', 'C', 'C', 'T', 'G', 'A', 'C', '-', 'C'] => "-" in this line means insertion (Ref)
['T', 'T', 'T', 'T', 'T', 'C', 'A', 'T', 'T', 'A', 'T', 'T', 'G', '_', '+G', 'T', 'C', 'C', 'T', 'G', '_', 'C', '+T', 'C'] => "_" in this line means deletion (Query)
hsa-miR-335-3p TTTTTCATTATTGCTCCTGACC TTTTTCATTATTGGTCCTGCTC
"""
#print(seq_master +"\t"+ master_seq +"\t"+ seq_m +" "+ new_string+"\t"+variant)
iso_add={} # Insertions
iso_del={} # Deletions
iso_sub={} # Substitutions
iso_5p_add=iso_5p_del=""
iso_3p_add=iso_3p_del=""
#print("2"+ seq_m + " "+ str(start)+ " " + str(gen_start))
for pidx, v in enumerate(master_seq_bc):
if v == "-": # Insertions
iso_add[pidx] = sub[pidx]
elif sub[pidx] == "_": # Deletions
iso_del[pidx] = v
elif v != sub[pidx]: # Substitutions
iso_sub[pidx] = sub[pidx]
limit_master = len(master_seq_bc)
## Loop to detect 5p changes ##
for i in range(limit_master):
if i in iso_add:
iso_5p_add += iso_add[i]
elif i in iso_del:
iso_5p_del += iso_del[i]
else:
break
## Loop to detect 3p changes ##
for i in range(limit_master, -1, -1):
if i-1 in iso_add:
iso_3p_add += iso_add[i-1]
elif i-1 in iso_del:
iso_3p_del += iso_del[i-1]
else:
break
## Loop to detect internal changes ##
## Find and trim all the 5p and 3p changes to retain only the internal variants ##
#cigar5padd = cigar5pdel = len3padd = cigar3pdel ="" # variant=iso_3p:-1; Cigar=21M;
variant = ""
#print(precursorSeq)
#print(precursorSeq[start-1:end])
#print(seq_m)
if iso_5p_add != "":
a5p = iso_5p_add
a5p = a5p.replace("+","")
len5padd = len(a5p)
pre_5pAdd = list(precursorSeq[start-len5padd-1:start-1])
try:
template5p=non_template5p=0
for e5pidx, each5ps in enumerate(a5p):
if each5ps == pre_5pAdd[e5pidx]:
template5p += 1
else:
non_template5p += 1
if template5p != 0:
variant+= "iso_5p:+"+str(template5p)+","
if non_template5p != 0:
variant+= "iso_add5p:+"+str(non_template5p)+","
except IndexError:
variant+= "iso_5p:-"+str(len5padd)+","
start = start - len5padd
if gen_strand != "-":
#gen_start = gen_start + len5pdel
gen_start = gen_start - len5padd
# If the addition is a SNV w.r.t precursor, then it will be => iso_add5p:N. Number of non-template nucleotides added at 3p.
#del sub[0:len5padd]
#del master_seq_bc[0:len5padd]
#print("5p_add:" + a5p + "Len"+ str(len5padd))
#cigar5padd = str(len5padd)+"I"
if iso_5p_del != "":
d5p = iso_5p_del
len5pdel = len(d5p)
variant+= "iso_5p:+"+str(len5pdel)+","
start = start + len5pdel
if gen_strand != "-":
gen_start = gen_start + len5pdel
#del sub[0:len5pdel]
#del master_seq_bc[0:len5pdel]
#print("5p_del:" + d5p +"Len"+ str(len5pdel))
#cigar5pdel = str(len5pdel)+"D"
if iso_3p_add != "":
a3p = "".join(iso_3p_add[::-1])
a3p = a3p.replace("+","")
len3padd = len(a3p)
#variant+= "iso_3p:+"+str(len3padd)+","
pre_3pAdd = list(precursorSeq[end:end+len3padd])
try:
template3p=non_template3p=0
for e3pidx, each3ps in enumerate(a3p):
if each3ps == pre_3pAdd[e3pidx]:
template3p += 1
else:
non_template3p += 1
if template3p != 0:
variant+= "iso_3p:+"+str(template3p)+","
if non_template3p != 0:
variant+= "iso_add3p:+"+str(non_template3p)+","
except IndexError:
variant+= "iso_3p:+"+str(len3padd)+","
#iso_add3p
end = end + len3padd
gen_end = gen_end + len3padd
if gen_strand == "-":
gen_start-=len3padd
# If the addition is a SNV w.r.t precursor, then it will be => iso_add3p:N. Number of non-template nucleotides added at 3p.
#del sub[-len3padd:]
#del master_seq_bc[-len3padd:]
#print("3p_add:" + a3p + "Len"+ str(len3padd))
#cigar3padd = str(len3padd)+"I"
if iso_3p_del != "":
d3p = "".join(iso_3p_del[::-1])
len3pdel = len(d3p)
variant+= "iso_3p:-"+str(len3pdel)+","
end = end - len3pdel
gen_end = gen_end - len3pdel
if gen_strand == "-":
gen_start+=len3pdel
#del sub[-len3pdel:]
#del master_seq_bc[-len3pdel:]
#print("3p_del:" + d3p +"Len"+ str(len3pdel))
#cigar3pdel = str(len3pdel)+"D"
# Now, these array's for reference and query doesn't have changes at the 5' or 3' ends. So, any variant correspond to internal changes
#print(precursorSeq[start-1:end])
new_var_type={}
if iso_sub:
for xs in iso_sub.keys():
if xs == 7:
new_var_type["iso_snv_central_offset,"] = "1"
elif xs >= 1 and xs <= 6:
new_var_type["iso_snv_seed,"] = "1"
elif xs >= 8 and xs <= 12:
new_var_type["iso_snv_central,"] = "1"
elif xs >= 13 and xs <= 17:
new_var_type["iso_snv_central_supp,"] = "1"
else:
new_var_type["iso_snv,"] = "1"
variant+= "".join(new_var_type.keys())
#print(new_var_type)
if variant.endswith(","):
variant = re.sub(',$','',variant)
#print(variant)
"""
# PREPARING CIGAR BODY
"""
#print("Arun:")
#print(master_seq_bc)
#print("Patil:")
match_case = ""
for snv_id, snv in enumerate(master_seq_bc):
if snv == sub[snv_id]:
match_case+= "M"
elif snv == "-":
match_case+= "M"
#match_case+= "I"
elif sub[snv_id] == "_":
#match_case+= "D"
match_case+= "M"
else:
match_case+=master_seq_bc[snv_id] # 11MA7M to indicates there is a mismatch at position 12, where A is the reference nucleotide.
#match_case+=sub[snv_id]
## CREATING THE CIGAR FORMAT HERE ##
match_case = match_case.replace("+","")
#print(seq_master, match_case, seq_m)
count_4cigar=0
iso_cigar="" # This varialbe is actually CIGAR variable which collects CIGAR information
for isx, ist in enumerate(match_case):
if isx != 0:
if ist == match_case[isx-1]:
count_4cigar +=1
else:
if count_4cigar != 1:
iso_cigar += str(count_4cigar)+match_case[isx-1]
count_4cigar =1
else:
iso_cigar += match_case[isx-1]
count_4cigar =1
else:
count_4cigar +=1
if count_4cigar != 1:
iso_cigar += str(count_4cigar)+ist
else:
iso_cigar += ist
if "A" not in match_case and "T" not in match_case and "G" not in match_case and "C" not in match_case:
iso_cigar = str(len(seq_m))+"M"
else:
pass
#print(seq_m, iso_cigar)
if variant == "":
variant = "iso_snv"
iso_mi_var = seq_master+"\t"+version_db+"\t"+type_rna+"\t"+str(start)+"\t"+str(end)+"\t.\t+\t.\tRead="+seq_m+"; UID="+uid_val+"; Name="+ seq_master +"; Parent="+req_precursor_name+"; Variant="+variant+"; Cigar="+iso_cigar+"; Expression="+canonical_expression +"; Filter=Pass; Hits="+ canonical_expression + "\n"
gffwrite.write(iso_mi_var)
iovariant = re.sub(',',';',variant)
iovarlist = iovariant.split(";")
for iv in iovarlist:
if iv != "":
if ":" in iv:
iv = iv.split(":")[0]
try:
JS_variantType_dataDict[str(iv)] += int(JS_exprn_total)
except KeyError:
JS_variantType_dataDict[str(iv)] = int(JS_exprn_total)
#forJSgffData.write(str(seq_m)+","+seq_master+","+iovariant+","+str(canonical_expression)+","+JS_exprn_total+"\n")
valStr = ','.join([str(elem) for elem in iovarlist]) +"#"+str(canonical_expression)
try:
JS_hmap_iso_miRVar[seq_master].append(valStr)
except KeyError:
JS_hmap_iso_miRVar[seq_master] = [valStr]
try:
JS_hmap_iso_miRcounts[seq_master].append(canonical_expression.split(","))
except KeyError:
JS_hmap_iso_miRcounts[seq_master] = [canonical_expression.split(",")]
"""
FOR SAM/BAM FILE FORMAT: gen_chr, gen_start, gen_end
"""
bam_can_dict[seq_m] = str(gen_chr) +"\t"+ str(gen_start) +"\t"+ iso_cigar + "\t"+ gen_strand
#print(seq_m+"\t"+ str(gen_chr) +"\t"+ str(gen_start) +"\t"+ iso_cigar+"\n")
#print("--**END**--")
except KeyError:
pass
#print(seq_m+"\t"+seq_master)
#ACTGGCCTTGGAGTCAGAAGGC hsa-miR-378g
html_data.openDoChartJSD()
for xy, xz in JS_variantType_dataDict.items():
html_data.donutChartJSD(xy, xz)
html_data.closeDoChartJSD()
#print(JS_hmap_list_ref)
sum_JS_hmap_iso_miRcounts=dict()
#print(JS_hmap_iso_miRcounts)
for ji, jj in JS_hmap_iso_miRcounts.items():
new_list = [list(map(int, lst)) for lst in jj]
sum_JS_hmap_iso_miRcounts[ji] = [sum(i) for i in zip(*new_list)]
#print(sum_JS_hmap_iso_miRcounts)
#print(JS_hmap_iso_miRVar)
df_rpm = mirRPM_completeSet.reset_index()
for indx, name in enumerate(base_names):
# FIRST PICK TOP 40 isomiRs FROM EACH SAMPLE
tempDict = dict()
for td1, td2 in sum_JS_hmap_iso_miRcounts.items():
tempDict[td1] = td2[indx]
req_mir_names = sorted(df_rpm[['miRNA', name]].values.tolist(), key=lambda x: x[1], reverse=True)
#req_mir_names = sorted(df_rpm[['miRNA', name]].values.tolist(), key=lambda x: x[1], reverse=True)[:20]
only_AbundantmiRs = [ re.sub('/.*', '', item[0]) for item in req_mir_names]
abundant_miRsJS = []
for chkExts in only_AbundantmiRs:
if len(abundant_miRsJS) == 20:
break
try:
if JS_hmap_iso_miRVar[chkExts]:
abundant_miRsJS.append(chkExts)
except KeyError:
pass
req_mir_names = sorted(abundant_miRsJS)
#req_mir_names = sorted(only_AbundantmiRs)
#req_mir_names = sorted(tempDict, key=tempDict.get, reverse=True)[:20] # returns keys for top 40 miRNA expression (Only keys)
html_data.openisoHmapTop(name, req_mir_names)
for kindx, km in enumerate(req_mir_names):
#print(km, JS_hmap_iso_miRVar[km])
vary = dict()
var_list_items = ['iso_3p:-1', 'iso_3p:-2', 'iso_5p:+2','iso_5p:+1','iso_3p:+1','iso_add3p:+1','iso_3p:+2','iso_add3p:+2','iso_3p:+3', 'iso_add3p:+3', 'iso_3p:+4', 'iso_add3p:+4', 'iso_5p:-1', 'iso_add5p:+1', 'iso_5p:-2', 'iso_add5p:+2', 'iso_5p:-3', 'iso_add5p:+3', 'iso_5p:-4', 'iso_add5p:+4', 'iso_snv_seed', 'iso_snv_central_offset', 'iso_snv_central', 'iso_snv_central_supp','iso_snv']
for k_var in var_list_items: # Initializing dictionaries to zero to avoid try catch except block in the later
vary[k_var] = 0
for valsy in JS_hmap_iso_miRVar[km]:
lhs = valsy.split("#")
#print(valsy, lhs)
reqExprval = int(lhs[1].split(",")[indx])
reqVaritems = lhs[0].split(",")
for varkeys in reqVaritems:
#print(km, varkeys, reqExprval, valsy)
try:
vary[varkeys]+= int(reqExprval)
except KeyError:
vary[varkeys] = int(reqExprval)
iso_3pm1 = "%.1f" %math.log2(vary['iso_3p:-1']) if vary['iso_3p:-1'] > 0 else 0
iso_3pm2 = "%.1f" %math.log2(vary['iso_3p:-2']) if vary['iso_3p:-2'] > 0 else 0
iso_5pm1 = "%.1f" %math.log2(vary['iso_5p:+1']) if vary['iso_5p:+1'] > 0 else 0
iso_5pm2 = "%.1f" %math.log2(vary['iso_5p:+2']) if vary['iso_5p:+2'] > 0 else 0
iso_3pp1 = "%.1f" %math.log2(vary['iso_3p:+1'] + vary['iso_add3p:+1']) if (vary['iso_3p:+1'] + vary['iso_add3p:+1']) > 0 else 0
iso_3pp2 = "%.1f" %math.log2(vary['iso_3p:+2'] + vary['iso_add3p:+2']) if (vary['iso_3p:+2'] + vary['iso_add3p:+2']) > 0 else 0
iso_3pp3 = "%.1f" %math.log2(vary['iso_3p:+3'] + vary['iso_add3p:+3']) if (vary['iso_3p:+3'] + vary['iso_add3p:+3']) > 0 else 0
iso_3pp4 = "%.1f" %math.log2(vary['iso_3p:+4'] + vary['iso_add3p:+4']) if (vary['iso_3p:+4'] + vary['iso_add3p:+4']) > 0 else 0
iso_5pp1 = "%.1f" %math.log2(vary['iso_5p:-1'] + vary['iso_add5p:+1']) if (vary['iso_5p:-1'] + vary['iso_add5p:+1']) > 0 else 0
iso_5pp2 = "%.1f" %math.log2(vary['iso_5p:-2'] + vary['iso_add5p:+2']) if (vary['iso_5p:-2'] + vary['iso_add5p:+2']) > 0 else 0
iso_5pp3 = "%.1f" %math.log2(vary['iso_5p:-3'] + vary['iso_add5p:+3']) if (vary['iso_5p:-3'] + vary['iso_add5p:+3']) > 0 else 0
iso_5pp4 = "%.1f" %math.log2(vary['iso_5p:-4'] + vary['iso_add5p:+4']) if (vary['iso_5p:-4'] + vary['iso_add5p:+4']) > 0 else 0
iso_3pp5 = 0
iso_5pp5 = 0
for item_3p in [x for x in vary.keys() if "iso_3p:+" in x]:
if 'iso_3p:+1' not in item_3p and 'iso_3p:+2' not in item_3p and 'iso_3p:+3' not in item_3p and 'iso_3p:+4' not in item_3p:
iso_3pp5 += int(vary[item_3p])
for item_add3p in [ xa for xa in vary.keys() if "iso_add3p:+" in xa]:
if 'iso_add3p:+1' not in item_add3p and 'iso_add3p:+2' not in item_add3p and 'iso_add3p:+3' not in item_add3p and 'iso_add3p:+4' not in item_add3p:
iso_3pp5 += int(vary[item_add3p])
for item_5p in [x for x in vary.keys() if "iso_5p:-" in x]:
if 'iso_5p:-1' not in item_5p and 'iso_5p:-2' not in item_5p and 'iso_5p:-3' not in item_5p and 'iso_5p:-4' not in item_5p:
iso_5pp5 += int(vary[item_5p])
for item_add5p in [ xa for xa in vary.keys() if "iso_add5p:+" in xa]:
if 'iso_add5p:+1' not in item_add5p and 'iso_add5p:+2' not in item_add5p and 'iso_add5p:+3' not in item_add5p and 'iso_add5p:+4' not in item_add5p:
iso_5pp5 += int(vary[item_add5p])
iso_3pp5 = "%.1f" %math.log2(iso_3pp5) if iso_3pp5 > 0 else 0
iso_5pp5 = "%.1f" %math.log2(iso_5pp5) if iso_5pp5 > 0 else 0
try:
ref_val = "%.1f" %math.log2(int(list(JS_hmap_list_ref[km])[indx])) if int(list(JS_hmap_list_ref[km])[indx]) > 0 else 0
except KeyError:
ref_val = 0
snv_1 = "%.1f" %math.log2(vary['iso_snv_seed']) if vary['iso_snv_seed'] > 0 else 0
snv_2 = "%.1f" %math.log2(vary['iso_snv_central_offset']) if vary['iso_snv_central_offset'] > 0 else 0
snv_3 = "%.1f" %math.log2(vary['iso_snv_central']) if vary['iso_snv_central'] > 0 else 0
snv_4 = "%.1f" %math.log2(vary['iso_snv_central_supp']) if vary['iso_snv_central_supp'] > 0 else 0
snv_5 = "%.1f" %math.log2(vary['iso_snv']) if vary['iso_snv'] > 0 else 0
iso_data_js = "["+ str(kindx) + ",0," + str(iso_3pp5) + "],[" + str(kindx) + ",1," + str(iso_3pp4) + "],[" + str(kindx) + ",2," + str(iso_3pp3) + "],[" + str(kindx) + ",3," + str(iso_3pp2) + "],[" + str(kindx) + ",4," + str(iso_3pp1) + "],[" + str(kindx) + ",5," + str(ref_val) + "],[" + str(kindx) + ",6," + str(iso_3pm1) + "],[" + str(kindx) + ",7," + str(iso_3pm2) + "],[" + str(kindx) + ",8," + str(snv_1) + "],[" + str(kindx) + ",9," + str(snv_2) + "],[" + str(kindx) + ",10," + str(snv_3) + "],[" + str(kindx) + ",11," + str(snv_4) + "],[" + str(kindx) + ",12," + str(snv_5) + "],[" + str(kindx) + ",13," + str(iso_5pm2) + "],[" + str(kindx) + ",14," + str(iso_5pm1) + "],[" + str(kindx) + ",15," + str(iso_5pp1) + "],[" + str(kindx) + ",16," + str(iso_5pp2) + "],[" + str(kindx) + ",17," + str(iso_5pp3) + "],[" + str(kindx) + ",18," + str(iso_5pp4) + "],[" + str(kindx) + ",19," + str(iso_5pp5) + "]"
html_data.isoHmapData(iso_data_js)
#print(km, vary, iso_3pp1)
#print(iso_data_js)
html_data.closeisoHmapBottom()
if args.bam_out:
mirna_samFile = Path(workDir)/"miRge3_miRNA.sam"
genC=0
genS=0
cig=0
with open(mirna_samFile) as miSam:
for mi_idx, mi_sam in enumerate(miSam):
mi_sam = mi_sam.strip()
mi_sam_list = mi_sam.split("\t")
try:
#if bam_can_dict[mi_sam_list[0]]:
sam_exprn_list = bam_expression_dict[mi_sam_list[0]]
for ex_idx, exprn in enumerate(sam_exprn_list):
(genC, genS, cig, strand) = bam_can_dict[mi_sam_list[0]].split("\t")
if exprn >= 1:
file_sam_name = str(base_names[ex_idx]) +".sam"
sam_name = Path(workDir)/file_sam_name
xbam = open(sam_name, "a+")
for numexp in range(exprn):
#print()
#readname = "r"+str(mi_idx) + "_" + str(numexp)
readname = mi_sam_list[0] + "_" + str(numexp)
phredQual = "I"*len(mi_sam_list[0])
cigar = str(len(mi_sam_list[0]))+"M"
#xbamout = readname+"\t"+mi_sam_list[1]+"\t"+genC+"\t"+str(genS)+"\t"+mi_sam_list[4]+"\t"+mi_sam_list[5]+"\t"+mi_sam_list[6]+"\t"+mi_sam_list[7]+"\t"+mi_sam_list[8]+"\t"+mi_sam_list[9]+"\t"+mi_sam_list[10]+"\n"
if strand == "+":
xbamout = readname+"\t"+mi_sam_list[1]+"\t"+genC+"\t"+str(genS)+"\t"+mi_sam_list[4]+"\t"+cigar+"\t"+mi_sam_list[6]+"\t"+mi_sam_list[7]+"\t"+mi_sam_list[8]+"\t"+mi_sam_list[0]+"\t"+phredQual+"\n"
else:
xbamout = readname+"\t"+mi_sam_list[1]+"\t"+genC+"\t"+str(genS)+"\t"+mi_sam_list[4]+"\t"+cigar+"\t"+mi_sam_list[6]+"\t"+mi_sam_list[7]+"\t"+mi_sam_list[8]+"\t"+mi_sam_list[0][::-1]+"\t"+phredQual+"\n"
xbam.write(xbamout)
xbam.close()
except KeyError:
pass
def addDashNew(seq, totalLength, start, end):
newSeq = '-'*(start-1)+seq+'-'*(totalLength-end)
return newSeq
def trfTypes(seq, tRNAName, start, trnaStruDic):
if 'pre_' not in tRNAName:
tRNASeq = trnaStruDic[tRNAName]['seq']
tRNAStru = trnaStruDic[tRNAName]['stru']
tRNASeqLen = len(tRNASeq)
# anticodonStart and anticodonEnd is 1-based position, so change it into 0-based
anticodonStart = trnaStruDic[tRNAName]['anticodonStart']-1
anticodonEnd = trnaStruDic[tRNAName]['anticodonEnd']-1
if start == 0:
if start+len(seq) == tRNASeqLen:
trfType = 'tRF-whole'
elif start+len(seq)-1 >= anticodonStart-2 and start+len(seq)-1 <= anticodonStart+1:
trfType = "5'-half"
else:
trfType = "5'-tRF"
else:
if start+len(seq)-1 >= tRNASeqLen-1-2 and start+len(seq)-1 <= tRNASeqLen-1:
if start >= anticodonStart-1 and start <= anticodonStart+2:
trfType = "3'-half"
else:
trfType = "3'-tRF"
else:
trfType = 'i-tRF'
else:
trfType = 'tRF-1'
return trfType
def summarize(args, workDir, ref_db,base_names, pdMapped, sampleReadCounts, trimmedReadCounts, trimmedReadCountsUnique):
"""
THIS FUNCTION IS CALLED FIRST FROM THE miRge3.0 to summarize the output.
"""
global html_data
html_data = FormatJS(workDir)
ca_thr = float(args.crThreshold)
mfname = args.organism_name + "_merges_" + ref_db + ".csv"
mergeFile = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/mfname
if args.spikeIn:
col_headers = ['hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','spike-in']
col_vars = ['hmir','mtrna','pmtrna','snorna','rrna','ncrna','mrna','spikein']
else:
col_headers = ['hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA']
col_vars = ['hmir','mtrna','pmtrna','snorna','rrna','ncrna','mrna']
empty_list=dict() #Actually this is a dictionary, to collect dictionary of `sample names` as keys and `sum of expression` as values for each element of col_vars. Sorry for naming it _list.
for element, col_in in enumerate(col_headers):
for file_name in base_names:
if col_vars[element] in empty_list:
empty_list[col_vars[element]].update({file_name:pdMapped[pdMapped[col_in].astype(bool)][file_name].values.sum()})
else:
empty_list[col_vars[element]] = {file_name:pdMapped[pdMapped[col_in].astype(bool)][file_name].values.sum()}
"""
BEGINNING OF WORKING AROUND WITH EXACT miRNA and isomiRs
"""
mirMergedNameDic={}
mirMergedDataframeDic={}
try:
print('Openning FILE:', mergeFile)
with open(mergeFile, "r") as merge_file:
for line in merge_file:
line_content = line.strip().split(',')
for item in line_content[1:]:
mirMergedNameDic.update({item:line_content[0]})
mirMergedDataframeDic.update({line_content[0]:"1"})
except FileNotFoundError:
print('FILE not found:', mergeFile)
pass
#allSequences = pdMapped.index.shape[0]
#print(allSequences)
pdMapped = pdMapped.reset_index(level=['Sequence'])
subpdMapped = pdMapped[(pdMapped['exact miRNA'].astype(bool) | pdMapped['isomiR miRNA'].astype(bool))]
cannonical = pdMapped[pdMapped['exact miRNA'].astype(bool)]
isomirs = pdMapped[pdMapped['isomiR miRNA'].astype(bool)]
cannonical_4ie = cannonical
isomirs_4ie = isomirs
cannonical_4gff = cannonical
isomirs_4gff = isomirs
#### MOVED TWO IF CONDITIONS args.gff_out or args.bam_out: and args.bam_out: from next line
if args.spikeIn:
cannonical = cannonical.drop(columns=['Sequence','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','isomiR miRNA','spike-in'])
isomirs = isomirs.drop(columns=['Sequence','exact miRNA','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','spike-in'])
cannonical_4ie = cannonical_4ie.drop(columns=['hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','isomiR miRNA','spike-in'])
isomirs_4ie = isomirs_4ie.drop(columns=['exact miRNA','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','spike-in'])
subpdMapped = subpdMapped.drop(columns=['Sequence','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','spike-in'])
else:
cannonical = cannonical.drop(columns=['Sequence','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','isomiR miRNA'])
isomirs = isomirs.drop(columns=['Sequence','exact miRNA','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag'])
cannonical_4ie = cannonical_4ie.drop(columns=['hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag','isomiR miRNA'])
isomirs_4ie = isomirs_4ie.drop(columns=['exact miRNA','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag'])
subpdMapped = subpdMapped.drop(columns=['Sequence','hairpin miRNA','mature tRNA','primary tRNA','snoRNA','rRNA','ncrna others','mRNA','annotFlag'])
subpdMapped['miRNA_cbind'] = subpdMapped[['exact miRNA', 'isomiR miRNA']].apply(lambda x: ''.join(x), axis = 1)
subpdMapped['miRNA_fin'] = subpdMapped['miRNA_cbind'].map(mirMergedNameDic)
subpdMapped = subpdMapped.fillna(0)
subpdMapped.loc[subpdMapped.miRNA_fin == 0, 'miRNA_fin'] = subpdMapped.miRNA_cbind
subpdMapped.set_index('miRNA_cbind',inplace = True)
cannonical.set_index('exact miRNA',inplace = True)
isomirs.set_index('isomiR miRNA',inplace = True)
cann_collapse = cannonical.groupby(['exact miRNA']).sum()[base_names]
iso_collapse = isomirs.groupby(['isomiR miRNA']).sum()[base_names]
cann_collapse = cann_collapse.reset_index(level=['exact miRNA'])
iso_collapse = iso_collapse.reset_index(level=['isomiR miRNA'])
df = pd.DataFrame(cann_collapse['exact miRNA'].tolist(), columns = ['exact miRNA'])
for file_name in base_names:
df = mirge_can(cann_collapse, iso_collapse, df, ca_thr, file_name)
df['miRNA'] = df['exact miRNA'].map(mirMergedNameDic)
df = df.fillna(0)
df.loc[df.miRNA == 0, 'miRNA'] = df['exact miRNA']
df.set_index('miRNA',inplace = True)
df.drop(columns=['exact miRNA'])
df = df.groupby(['miRNA']).sum()[base_names]
#df = df.loc[(df.sum(axis=1) != 0)] # THIS WILL ELEMINATE ROWS ACCROSS SAMPLES WHO'S SUM IS ZERO
Filtered_miRNA_Reads = df.sum(axis = 0, skipna = True)[base_names]
Filtered_miRNA_Reads = Filtered_miRNA_Reads.to_dict()
miR_RPM = (df.div(df.sum(axis=0))*1000000).round(4)
miRNA_df = subpdMapped.groupby(['miRNA_cbind']).sum()[base_names]
sumTotal = miRNA_df.sum(axis = 0, skipna = True)
l_1d = sumTotal.to_dict()
miRgefileToCSV = Path(workDir)/"miR.Counts.csv"
miRgeRPMToCSV = Path(workDir)/"miR.RPM.csv"
indexName = str(args.organism_name) + '_mirna_' + str(ref_db)
indexFiles = Path(args.libraries_path)/args.organism_name/"index.Libs"/indexName
bwtCommand = Path(args.bowtie_path)/"bowtie-inspect" if args.bowtie_path else "bowtie-inspect"
bwtExec = str(bwtCommand) + " -n " + str(indexFiles)
#bwtExec = "bowtie-inspect -n /home/arun/repositories/Project_120919/mirge/Libs/human/index.Libs/human_mirna_miRBase"
print("[CMD:]", bwtExec)
bowtie = subprocess.run(str(bwtExec), shell=True, check=True, stdout=subprocess.PIPE, text=True, stderr=subprocess.PIPE, universal_newlines=True)
if bowtie.returncode==0:
bwtOut = bowtie.stdout
bwtErr = bowtie.stderr
lines = bwtOut.strip()
for srow in lines.split('\n'):
if srow not in mirMergedNameDic:
mirMergedDataframeDic.update({srow:"1"})
mirMerged_df = pd.DataFrame(list(mirMergedDataframeDic.keys()),columns = ['miRNA']) #Contains all the miRNA including those that is not expressed
mirMerged_df.set_index('miRNA',inplace = True)
mirCounts_completeSet = mirMerged_df.join(df, how='outer').fillna(0)
mirRPM_completeSet = mirMerged_df.join(miR_RPM, how='outer').fillna(0)
#df.to_csv(miRgefileToCSV)
#miR_RPM.to_csv(miRgeRPMToCSV)
mirCounts_completeSet.to_csv(miRgefileToCSV)
mirRPM_completeSet.to_csv(miRgeRPMToCSV)
if args.gff_out or args.bam_out:
pre_mirDict = dict()
mirDict = dict()
filenamegff = workDir/"sample_miRge3.gff"
maturefname = args.organism_name + "_mature_" + ref_db + ".fa"
pre_fname = args.organism_name + "_hairpin_" + ref_db
fasta_file = Path(args.libraries_path)/args.organism_name/"fasta.Libs"/maturefname
precursor_file = Path(args.libraries_path)/args.organism_name/"index.Libs"/pre_fname
annotation_pre_fname = args.organism_name+"_"+ref_db+".gff3"
annotation_lib = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/annotation_pre_fname
bwtCommand = Path(args.bowtie_path)/"bowtie-inspect" if args.bowtie_path else "bowtie-inspect"
bwtExec = str(bwtCommand) + " -a 20000 -e "+ str(precursor_file)
print("[CMD:]", bwtExec)
bowtie = subprocess.run(str(bwtExec), shell=True, check=True, stdout=subprocess.PIPE, text=True, stderr=subprocess.PIPE, universal_newlines=True)
#READING PRECURSOR miRNA SEQUENCES INFORMATION IN A DICTIONARY (pre_mirDict)
if bowtie.returncode==0:
bwtOut = bowtie.stdout
bwtErr = bowtie.stderr
for srow in bwtOut.split('\n'):
if '>' in srow:
srow = srow.replace(">","")
headmil = srow.split(" ")[0]
#if "MirGeneDB" in ref_db:
# headmil = headmil.split("_")[0]
else:
#headmil = '-'.join(headmil.split('-')[:-1])
pre_mirDict[headmil] = srow
#READING MATURE miRNA SEQUENCES INFORMATION IN A DICTIONARY (mirDict)
with open(fasta_file) as mir:
for mil in mir:
mil = mil.strip()
if '>' in mil:
headmil_mi = mil.replace(">","")
#if "MirGeneDB" in ref_db:
# headmil_mi = headmil_mi.split("_")[0]
else:
mirDict[headmil_mi] = mil
d = Differ()
create_gff(args, pre_mirDict, mirDict, d, filenamegff, cannonical_4gff, isomirs_4gff, base_names, ref_db, annotation_lib, workDir, mirRPM_completeSet)
if args.bam_out:
pd_frame = ['snoRNA','rRNA','ncrna others','mRNA']
bwt_idx_prefname = ['snorna','rrna','ncrna_others','mrna']
for igv_idx, igv_name in enumerate(pd_frame):
dfRNA2sam = pdMapped[pdMapped[igv_name].astype(bool)]
pre_cols_birth = ["Sequence", igv_name]
cols1 = pre_cols_birth + base_names
df_sam_out = pd.DataFrame(dfRNA2sam, columns= cols1) # Gives list of list containg Sequence, RNA type, expression values for the samples
df_expr_list = df_sam_out.values.tolist()
rna_type = args.organism_name + "_" + bwt_idx_prefname[igv_idx]
index_file_name = Path(args.libraries_path)/args.organism_name/"index.Libs"/rna_type
bow2bam(args, workDir, ref_db, df_expr_list, base_names, index_file_name, rna_type, bwt_idx_prefname[igv_idx])
createBAM(args, workDir, base_names)
#https://stackoverflow.com/questions/35125062/how-do-i-join-2-columns-of-a-pandas-data-frame-by-a-comma
miRNA_counts={}
trimmed_counts={}
for file_name in base_names:
numOfRows = df.index[df[file_name] > 0].shape[0]
mapped_rows = pdMapped.index[pdMapped[file_name] > 0].shape[0]
mirna_dict = {file_name:numOfRows}
miRNA_counts.update(mirna_dict)
"""
END OF WORKING AROUND WITH EXACT miRNA and isomiRs
"""
trimmed_counts = trimmedReadCountsUnique
if args.spikeIn:
pre_summary = {'Total Input Reads':sampleReadCounts,'Trimmed Reads (all)':trimmedReadCounts,'Trimmed Reads (unique)':trimmed_counts,'All miRNA Reads':l_1d,'Filtered miRNA Reads':Filtered_miRNA_Reads,'Unique miRNAs':miRNA_counts, 'Hairpin miRNAs':empty_list[col_vars[0]],'mature tRNA Reads':empty_list[col_vars[1]],'primary tRNA Reads':empty_list[col_vars[2]],'snoRNA Reads':empty_list[col_vars[3]],'rRNA Reads':empty_list[col_vars[4]],'ncRNA others':empty_list[col_vars[5]],'mRNA Reads':empty_list[col_vars[6]],'Spike-in':empty_list[col_vars[7]]}
col_tosum = ['All miRNA Reads','Hairpin miRNAs','mature tRNA Reads','primary tRNA Reads','snoRNA Reads','rRNA Reads','ncRNA others','mRNA Reads','Spike-in']
colRearrange = ['Total Input Reads', 'Trimmed Reads (all)','Trimmed Reads (unique)','All miRNA Reads','Filtered miRNA Reads','Unique miRNAs','Hairpin miRNAs','mature tRNA Reads','primary tRNA Reads','snoRNA Reads','rRNA Reads','ncRNA others','mRNA Reads','Spike-in','Remaining Reads']
else:
pre_summary = {'Total Input Reads':sampleReadCounts,'Trimmed Reads (all)':trimmedReadCounts,'Trimmed Reads (unique)':trimmed_counts,'All miRNA Reads':l_1d,'Filtered miRNA Reads':Filtered_miRNA_Reads,'Unique miRNAs':miRNA_counts, 'Hairpin miRNAs':empty_list[col_vars[0]],'mature tRNA Reads':empty_list[col_vars[1]],'primary tRNA Reads':empty_list[col_vars[2]],'snoRNA Reads':empty_list[col_vars[3]],'rRNA Reads':empty_list[col_vars[4]],'ncRNA others':empty_list[col_vars[5]],'mRNA Reads':empty_list[col_vars[6]]}
col_tosum = ['All miRNA Reads','Hairpin miRNAs','mature tRNA Reads','primary tRNA Reads','snoRNA Reads','rRNA Reads','ncRNA others','mRNA Reads']
colRearrange = ['Total Input Reads', 'Trimmed Reads (all)','Trimmed Reads (unique)','All miRNA Reads','Filtered miRNA Reads','Unique miRNAs','Hairpin miRNAs','mature tRNA Reads','primary tRNA Reads','snoRNA Reads','rRNA Reads','ncRNA others','mRNA Reads','Remaining Reads']
"""
Calcuate isomir entropy
"""
def calcEntropy(inputList):
sum1 = sum(inputList)
entropy = 0
for i in range(len(inputList)):
if inputList[i] > 1:
freq = float(inputList[i])/sum1
entropy = entropy + -1*freq*math.log(freq, 2)
return entropy
def create_ie(args, cannonical, isomirs, base_names, workDir, Filtered_miRNA_Reads):
isomirFile = Path(workDir)/"isomirs.csv"
isomirSampleFile = Path(workDir)/"isomirs.samples.csv"
outf1 = open(isomirFile, 'w')
outf2 = open(isomirSampleFile, 'w')
outf1.write('miRNA,sequence')
outf2.write('miRNA')
for i in range(len(base_names)):
outf1.write(','+base_names[i])
outf2.write(','+base_names[i]+' isomir+miRNA Entropy')
outf2.write(','+base_names[i]+' Canonical Sequence')
outf2.write(','+base_names[i]+' Canonical RPM')
outf2.write(','+base_names[i]+' Top Isomir RPM')
outf1.write(',Entropy\n')
outf2.write('\n')
pre_cols1 = ["Sequence","exact miRNA"]
pre_cols2 = ["Sequence","isomiR miRNA"]
cols1 = pre_cols1 + base_names
cols2 = pre_cols2 + base_names
can_gff_df = pd.DataFrame(cannonical, columns= cols1) # Gives list of list containg Sequence, miRNA name, expression values for the samples - ref miRNA
iso_gff_df = pd.DataFrame(isomirs, columns= cols2) # Gives list of list containg Sequence, miRNA name, expression values for the samples - isomiR
canonical_gff = can_gff_df.values.tolist()
isomir_gff = iso_gff_df.values.tolist()
freq_list=[]
for fname in base_names:
try:
freq_list.append(1000000/Filtered_miRNA_Reads[fname])
except ZeroDivisionError:
freq_list.append(0)
maxEntropy = math.log(len(base_names), 2)
"""
Collecting miRNA values across each samples into an array
"""
miR_can = {}
for each_can in canonical_gff:
canValScore = each_can[2:]
if ".SNP" in each_can[1]:
each_can[1] = each_can[1].split('.')[0]
try:
miR_can[each_can[1]].append(canValScore)
except KeyError:
miR_can[each_can[1]] = [canValScore]
"""
Collecting miRNA values across each samples into an array - Here the values for each sample is summed
"""
for key_mir, val_mir in miR_can.items():
res = [sum(i) for i in zip(*val_mir)]
miR_can[key_mir] = res
miR_iso = {}
for each_isoSeq in isomir_gff:
valueScore = each_isoSeq[2:]
entropy = calcEntropy(valueScore)
if maxEntropy == 0:
entropy= "NA"
else:
entropy = str(entropy/maxEntropy)
emptyListEntropy = []
#topIsomir = []
#isomirSum = []
for idxn, ival in enumerate(valueScore):
emptyListEntropy.append(str(ival*freq_list[idxn]))
#topIsomir.append(str(max(valueScore)*rpmFactor))
#isomirSum.append(str(sum(sampleIsomirs[sampleLane])*rpmFactor))
samplesEntropy = "\t".join(emptyListEntropy)
if ".SNP" in each_isoSeq[1]:
each_isoSeq[1] = each_isoSeq[1].split('.')[0]
try:
miR_iso[each_isoSeq[1]].append(valueScore)
except KeyError:
miR_iso[each_isoSeq[1]] = [valueScore]
outf1.write(each_isoSeq[1]+"\t"+each_isoSeq[0]+"\t"+ samplesEntropy +"\t"+ entropy + "\n")
for isokey, isoval in miR_iso.items():
#print(list(zip(*isoval)))
res = [i for i in zip(*isoval)]
isomirOut = [isokey]
for xn, x in enumerate(res):
iso_vals_asList =list(x)
topIsomir = max(iso_vals_asList)*freq_list[xn]
isomirSum = sum(iso_vals_asList)*freq_list[xn]
if isokey in miR_can:
iso_can_vals_list = iso_vals_asList + [miR_can[isokey][xn]]
miRNARPM = miR_can[isokey][xn] * freq_list[xn]
sampleEntropyWithmiRNA = calcEntropy(iso_can_vals_list)
maxEntropy = len(iso_vals_asList)
if maxEntropy > 1:
sampleEntropyWithmiRNA = str(sampleEntropyWithmiRNA/(math.log(maxEntropy,2)))
else:
sampleEntropyWithmiRNA = 'NA'
isomirOut.append(sampleEntropyWithmiRNA)
combined = miRNARPM + isomirSum
if combined >0:
isomirOut.append(str(100.0*miRNARPM/combined))
else:
isomirOut.append('NA')
isomirOut.append(str(miRNARPM))
isomirOut.append(str(topIsomir))
else:
pass
#print(list(x))
if len(isomirOut) > 1:
outf2.write(','.join(isomirOut))
outf2.write('\n')
outf1.close()
outf2.close()
#print(isomirOut)
#print(isokey, isoval)
#print(res)
#print(each_isoSeq)
# ['AAAAAACTCTAAACAA', 'hsa-miR-3145-5p', 0, 1]
if args.isoform_entropy:
create_ie(args, cannonical_4ie, isomirs_4ie, base_names, workDir, Filtered_miRNA_Reads)
if args.AtoI:
reqCols = ['miRNA']+base_names
mirCounts_completeSet = mirCounts_completeSet.reset_index(level=['miRNA'])
mirCC = pd.DataFrame(mirCounts_completeSet, columns= reqCols).values.tolist()
mirDic={}
for mC in mirCC:
mirDic[mC[0]] = mC[1:]
pre_cols1 = ["Sequence"]
cols1 = pre_cols1 + base_names
cols2 = pre_cols1 + base_names
can_ai_df = pd.DataFrame(cannonical_4ie, columns= cols1) # Gives list of list containg Sequence, miRNA name, expression values for the samples - ref miRNA
iso_ai_df = pd.DataFrame(isomirs_4ie, columns= cols2) # Gives list of list containg Sequence, miRNA name, expression values for the samples - isomiR
canonical_ai = can_ai_df.values.tolist()
onlyCannon = canonical_ai
onlyCanmiRNA={}
for oC in onlyCannon:
onlyCanmiRNA[oC[0]] = oC[1:]
isomir_ai = iso_ai_df.values.tolist()
canonical_ai.extend(isomir_ai)
seqDic={}
for sD in canonical_ai:
seqDic[sD[0]] = sD[1:]
#print(seqDic)
a2i_editing(args, cannonical_4ie, isomirs_4ie, base_names, workDir, Filtered_miRNA_Reads, mirMergedNameDic, mirDic, ref_db, seqDic, onlyCanmiRNA)
pass
if args.tRNA_frag:
m_trna_pre = pdMapped[pdMapped['mature tRNA'].astype(bool)]
p_trna_pre = pdMapped[pdMapped['primary tRNA'].astype(bool)]
m_trna_cols1 = ["Sequence","mature tRNA"] + base_names
p_trna_cols2 = ["Sequence","primary tRNA"] + base_names
m_trna = pd.DataFrame(m_trna_pre, columns= m_trna_cols1).values.tolist() # Gives list of list containg Sequence, mature tRNA, expression values for the samples - mature tRNA
p_trna = pd.DataFrame(p_trna_pre, columns= p_trna_cols2).values.tolist() # Gives list of list containg Sequence, primary tRNA, expression values for the samples - primary tRNA
trnaStruDic={}
fname = args.organism_name+'_trna.str'
trna_stru_file = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/fname
allFiles_inPlace = 1
try:
with open(trna_stru_file, 'r') as inf:
a=0
lines = inf.readlines()
for idx, xline in enumerate(lines):
xline=xline.strip()
if xline.startswith(">"):
trnaName = xline.replace(">","")
a+=1
elif a == 1:
a+=1
trnaSeq = xline
elif a == 2:
a=0
trnaStru = xline
anticodonStart = trnaStru.index('XXX')+1
anticodonEnd = anticodonStart+2
trnaStruDic.update({trnaName:{'seq':trnaSeq, 'stru':trnaStru, 'anticodonStart':anticodonStart, 'anticodonEnd':anticodonEnd}})
except IOError:
allFiles_inPlace = 0
print(f"File {trna_stru_file} does not exist!!\nProceeding the annotation with out -trf\n")
fname2 = args.organism_name+'_trna_aminoacid_anticodon.csv'
trna_aa_anticodon_file = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/fname2
trnaAAanticodonDic = {}
try:
with open(trna_aa_anticodon_file, 'r') as inf:
for line in inf:
contentTmp = line.strip().split(',')
trnaAAanticodonDic.update({contentTmp[0]:{'aaType':contentTmp[1], 'anticodon':contentTmp[2]}})
except IOError:
allFiles_inPlace = 0
print(f"File {trna_aa_anticodon_file} does not exist!!\nProceeding the annotation with out -trf\n")
fname3 = args.organism_name+'_trna_deduplicated_list.csv'
trna_duplicated_list_file = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/fname3
duptRNA2UniqueDic = {}
try:
with open(trna_duplicated_list_file, 'r') as inf:
line = inf.readline()
line = inf.readline()
while line != '':
contentTmp = line.strip().split(',')
for item in contentTmp[1].split('/'):
duptRNA2UniqueDic.update({item.strip():contentTmp[0].strip()})
line = inf.readline()
except IOError:
allFiles_inPlace = 0
print(f"File {trna_duplicated_list_file} does not exist!!\nProceeding the annotation with out -trf\n")
fname4 = args.organism_name+'_tRF_infor.csv'
tRF_infor_file = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/fname4
tRNAtrfDic = {}
try:
with open(tRF_infor_file, 'r') as inf:
line = inf.readline()
line = inf.readline()
while line != '':
content = line.strip().split(',')
tRNAName = content[0].split('_Cluster')[0]
tRNAClusterName = content[0]
seq = content[4]
tRNAlength = len(content[5])
start = int(content[3].split('-')[0])
end = int(content[3].split('-')[1])
if tRNAName not in tRNAtrfDic.keys():
tRNAtrfDic.update({tRNAName:{}})
tRNAtrfDic[tRNAName].update({addDashNew(seq, tRNAlength, start, end):tRNAClusterName})
line = inf.readline()
except IOError:
allFiles_inPlace = 0
print(f"File {tRF_infor_file} does not exist!!\nProceeding the annotation with out -trf\n")
# Load predifined tRF merged file
fname5 = args.organism_name+"_tRF_merges.csv"
tRF_merge_file = Path(args.libraries_path)/args.organism_name/"annotation.Libs"/fname5
trfMergedNameDic = {}
trfMergedList = []
try:
with open(tRF_merge_file, 'r') as inf:
for line in inf:
tmp = line.strip().split(',')
mergedName = tmp[0]
trfMergedList.append(mergedName)
for item in tmp[1].split('/'):
trfMergedNameDic.update({item:mergedName})
except IOError:
allFiles_inPlace = 0
print(f"File {tRF_merge_file} does not exist!!\nProceeding the annotation with out -trf\n")
pretrnaNameSeqDic = {}
file_pre_tRNA = args.organism_name+'_pre_trna'
indexFiles = Path(args.libraries_path)/args.organism_name/"index.Libs"/file_pre_tRNA
bwtCommand = Path(args.bowtie_path)/"bowtie-inspect" if args.bowtie_path else "bowtie-inspect"
bwtExec = str(bwtCommand) +" -a 20000 -e "+ str(indexFiles)
print("[CMD:]", bwtExec)
bowtie = subprocess.run(str(bwtExec), shell=True, check=True, stdout=subprocess.PIPE, text=True, stderr=subprocess.PIPE, universal_newlines=True)
#READING PRECURSOR miRNA SEQUENCES INFORMATION IN A DICTIONARY (pre_mirDict)
if bowtie.returncode==0:
bwtOut2 = bowtie.stdout
for srow in bwtOut2.split('\n'):
if srow != "":
if '>' in srow:
header = srow.replace(">","")
else:
pretrnaNameSeqDic.update({header:str(srow)})
else:
allFiles_inPlace = 0
# Deal with the alignment of mature tRNA
#alignmentResult[content[0]].append((content[2], content[1], content[3], content[5]))
if allFiles_inPlace == 1:
m_trna.extend(p_trna)
trfContentDic = {}
for mtrna_item in m_trna:
trfContentDic.update({mtrna_item[0]:{'count':mtrna_item[2:]}})
if "N" not in mtrna_item[0]:
trfContentDic[mtrna_item[0]]['uid'] = UID(mtrna_item[0], "tRF") # It is the Unique identifier, this function is present miRgeEssential.py
else:
trfContentDic[mtrna_item[0]]['uid'] = "."
# Open sam file for the mapped mature tRNA
matureMappedtRNA = workDir/"miRge3_tRNA.sam"
with open(matureMappedtRNA, "r") as minf:
for mline in minf:
mline=mline.strip()
#AAAACATCAGATTGTGAGTC 0 trnaMT_HisGTG_MT_+_12138_12206 18 255 20M * 0 0 AAAACATCAGATTGTGAGTC IIIIIIIIIIIIIIIIIIII XA:i:1 MD:Z:17A2 NM:i:1 XM:i:2
item = mline.split("\t")
startTmp = int(item[3])-1
trfContentDic[item[0]][item[2]] = {}
trfContentDic[item[0]][item[2]]['start'] = startTmp
trfContentDic[item[0]][item[2]]['end'] = startTmp+len(item[0])-1
trfContentDic[item[0]][item[2]]['cigar'] = 'undifined'
trfContentDic[item[0]][item[2]]['tRFType'] = trfTypes(item[0], item[2], startTmp, trnaStruDic)
primaryMappedtRNA = workDir/"miRge3_pre_tRNA.sam"
with open(primaryMappedtRNA, "r") as minf:
for mline in minf:
mline=mline.strip()
item = mline.split("\t")
startTmp = int(item[3])-1
trfContentDic[item[0]][item[2]] = {}
trfContentDic[item[0]][item[2]]['start'] = startTmp
trfContentDic[item[0]][item[2]]['cigar'] = 'undifined'
trfContentDic[item[0]][item[2]]['tRFType'] = trfTypes(item[0], item[2], startTmp, trnaStruDic)
# Only end coordinate will change
cutRemainderSeqLen = re.search('T{3,}$', item[0]).span(0)[0]
lenPostTrimming = len(item[0])-cutRemainderSeqLen ## Length after trimming the sequences at end for more than 3 TTT's
trfContentDic[item[0]][item[2]]['end'] = startTmp+len(item[0])-1-lenPostTrimming
## CALLING EXTERNAL FUNCTION FROM miRge2 TO OUTPUT THE tRNF RESULT FILES
mature_tRNA_Reads_values = list(empty_list[col_vars[1]].values())
primary_tRNA_Reads_values = list(empty_list[col_vars[2]].values())
trna_deliverables(args, workDir, pretrnaNameSeqDic, trfContentDic, mature_tRNA_Reads_values, primary_tRNA_Reads_values, trnaAAanticodonDic, base_names, trnaStruDic, duptRNA2UniqueDic, trfMergedList, tRNAtrfDic, trfMergedNameDic)
#pretrnaNameSeqDic
summary = | pd.DataFrame.from_dict(pre_summary) | pandas.DataFrame.from_dict |
#
# DATA EXTRACTED FROM:
#
# FREIRE, F.H.M.A; <NAME>; <NAME>. Projeção populacional municipal
# com estimadores bayesianos, Brasil 2010 - 2030. In: <NAME> (coord.).
# Seguridade Social Municipais. Projeto Brasil 3 Tempos. Secretaria Especial
# de Assuntos Estratégicos da Presidência da República (SAE/SG/PR) , United
# Nations Development Programme, Brazil (UNDP) and International Policy Centre
# for Inclusive Growth. Brasília (IPC-IG), 2019
#
from pathlib import Path
import pandas as pd
PATH = Path(__file__).parent.resolve()
DEST = PATH / "processed"
def fix_columns(df, name):
"""
Create multi-index for male/female columns of age distributions
"""
df.columns = pd.MultiIndex.from_tuples(
((name, int(x)) for x in df.columns), names=["gender", "age"]
)
return df
# Read raw data and transform a few columns
data = pd.read_csv(PATH / "age-distribution.csv.gz")
data = data.drop(columns=["name", "state", "total"])
data["id"] = data.pop("code").apply(lambda x: f"BR-{x}")
data["95"] = data["100"] = 0
print("Raw data loaded")
###############################################################################
# Group by municipality and append two columns for male/female distributions
def T(df, gender):
df = (
df[df["gender"] == gender]
.set_index(["id", "year"])
.drop(columns="gender")
.sort_index()
.astype("int32")
)
data = ((gender, int(x)) for x in df.columns)
df.columns = pd.MultiIndex.from_tuples(data, names=["gender", "age"])
return df
data = data.replace({"f": "female", "m": "male"})
male = T(data, "male")
female = T(data, "female")
data = | pd.concat([female, male], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
| assert_frame_equal(a.ix[:, 22, [111, 333]], b) | pandas.util.testing.assert_frame_equal |
# The analyser
import pandas as pd
import matplotlib.pyplot as plt
import dill
import os
import numpy as np
from funcs import store_namespace
from funcs import load_namespace
import datetime
from matplotlib.font_manager import FontProperties
from matplotlib import rc
community = 'ResidentialCommunity'
sim_ids = ['MinEne_0-2']
model_id = 'R2CW_HP'
bldg_list = load_namespace(os.path.join('path to models', 'teaser_bldgs_residential'))
#
bldg_list = [bldg_list[0], bldg_list[1]]
print(bldg_list)
folder = 'results'
step = 300
nodynprice=0
mon = 'jan'
constr_folder = 'decentr_enemin_constr_'+mon
#bldg_list = bldg_list[0:1]
if mon == 'jan':
start = '1/7/2017 16:30:00'
end = '1/7/2017 19:00:00'
controlseq_time = '01/07/2017 16:55:00'
elif mon == 'mar':
start = '3/1/2017 16:30:00'
end = '3/1/2017 19:00:00'
controlseq_time = '03/01/2017 16:55:00'
elif mon=='nov':
start = '11/20/2017 16:30:00'
end = '11/20/2017 19:00:00'
controlseq_time = '11/20/2017 16:55:00'
sim_range = pd.date_range(start, end, freq = str(step)+'S')
simu_path = "path to simulation folder"
other_input = {}
price = {}
flex_cost = {}
ref_profile = {}
controlseq = {}
opt_control = {}
emutemps = {}
mpctemps = {}
opt_stats = {}
flex_down = {}
flex_up = {}
power = {}
for bldg in bldg_list:
building = bldg+'_'+model_id
for sim_id in sim_ids:
opt_stats[sim_id] = {}
controlseq[sim_id] = {}
mpctemps[sim_id] = {}
emutemps[sim_id] = {}
power[sim_id] = {}
for time_idx in sim_range:
time_idx = time_idx.strftime('%m/%d/%Y %H:%M:%S')
t = time_idx.replace('/','-').replace(':','-').replace(' ','-')
opt_stats[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'opt_stats_'+sim_id+'_'+t))
emutemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'emutemps_'+sim_id+'_'+t))
mpctemps[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'mpctemps_'+sim_id+'_'+t))
controlseq[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'controlseq_'+sim_id)+'_'+t)
power[sim_id][time_idx] = load_namespace(os.path.join(simu_path, folder, 'power_'+sim_id)+'_'+t)
#flex_down[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_down'+sim_id))
#flex_up[sim_id] = load_namespace(os.path.join(simu_path, folder, 'flex_up'+sim_id))
i=0
for sim_id in sim_ids:
if i == 0:
emutemps_df = | pd.DataFrame.from_dict(emutemps[sim_id],orient='index') | pandas.DataFrame.from_dict |
"""Unittests for the `methods` module."""
import unittest
import pandas as pd
from pandas_data_cleaner import strategies
class TestRemoveDuplicates(unittest.TestCase):
"""Unittests for the `RemoveDuplicates` class."""
def test_invalid_options(self):
"""Test that when no options are provided, the `can_use_cleaner` method
indicates that there is an error.
"""
strategy = strategies.RemoveDuplicates(pd.DataFrame({'a': [1, 2, 3]}))
can_use, missing_options = strategy.can_use_cleaner()
self.assertFalse(can_use)
self.assertEqual(len(missing_options), 2)
def test_clean_keep_last(self):
"""Test that the `clean` method removes duplicates where `keep` is set
to `last`.
"""
dataframe = pd.DataFrame(
{
"id": [1, 2, 1],
"name": ["a", "a", "a"],
"email": ['<EMAIL>', '<EMAIL>', '<EMAIL>'],
"age": [1, 2, 1],
"active": [True, True, False], # Note: This is the only change
}
)
strategy = strategies.RemoveDuplicates(
dataframe,
remove_duplicates_subset_fields=['id'],
remove_duplicates_keep='last'
)
strategy.clean()
results = strategy.dataframe.reset_index(drop=True)
expected_results = pd.DataFrame(
{
"id": [2, 1],
"name": ["a", "a"],
"email": ['<EMAIL>', '<EMAIL>'],
"age": [2, 1],
"active": [True, False]
}
)
self.assertTrue(
results.equals(expected_results),
f"\nActual:\n{results}\nExpected:\n{expected_results}",
)
def test_clean_keep_first(self):
"""Test that the `clean` method removes duplicates where `keep` is set
to `first`.
"""
dataframe = pd.DataFrame(
{
"id": [1, 2, 1],
"name": ["a", "a", "a"],
"email": ['<EMAIL>', '<EMAIL>', '<EMAIL>'],
"age": [1, 2, 1],
"active": [True, True, False], # Note: This is the only change
}
)
strategy = strategies.RemoveDuplicates(
dataframe,
remove_duplicates_subset_fields=['id'],
remove_duplicates_keep='first'
)
strategy.clean()
results = strategy.dataframe.reset_index(drop=True)
expected_results = pd.DataFrame(
{
"id": [1, 2],
"name": ["a", "a"],
"email": ['<EMAIL>', '<EMAIL>'],
"age": [1, 2],
"active": [True, True]
}
)
self.assertTrue(
results.equals(expected_results),
f"\nActual:\n{results}\nExpected:\n{expected_results}",
)
class TestRenameHeaders(unittest.TestCase):
"""Unittests for the `RenameHeaders` class."""
def test_invalid_options(self):
"""Test that when no options are provided, the `can_use_cleaner` method
indicates that there is an error.
"""
strategy = strategies.RenameHeaders( | pd.DataFrame({'a': [1, 2, 3]}) | pandas.DataFrame |
import sys
import time
import requests
import pandas as pd
import os
import numpy as np
name_dicc = {
'208': 'Energy (Kcal)',
'203': 'Protein(g)',
'204': 'Total Lipid (g)',
'255': 'Water (g)',
'307': 'Sodium(mg)',
'269': 'Total Sugar(g)',
'291': 'Fiber(g)',
'301': 'Calcium(mg)',
'303': 'Iron (mg)',
}
path = os.getcwd()
link = pd.read_csv(os.path.join(path, 'link.csv'), sep=",", dtype={'ndbno': object})
numbers = link['ndbno'].tolist()
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
it = chunks(numbers, 25)
arr = []
g100 = []
for _i, chunk in enumerate(it):
print(f"Progress: {_i*100/(len(numbers)/25) :.2f}%")
response = {
'api_key': 'API',
'ndbno': chunk,
'format': 'json',
}
req = requests.get('https://api.nal.usda.gov/ndb/V2/reports', response)
for fd in req.json()['foods']:
if 'food' not in fd:
continue
food = fd['food']
name = food['desc']['name']
ndbno = food['desc']['ndbno']
nut_dicc = {
'208': np.nan,
'203': np.nan,
'204': np.nan,
'255': np.nan,
'307': np.nan,
'269': np.nan,
'291': np.nan,
'301': np.nan,
'303': np.nan,
}
ver = True
for nutrient in food['nutrients']:
if nutrient['nutrient_id'] in nut_dicc and \
('measures' not in nutrient or len(nutrient['measures']) == 0 or nutrient['measures'] == [None]):
ver = False
if not ver:
g100 += [ndbno]
print(ndbno)
for nutrient in food['nutrients']:
if nutrient['nutrient_id'] in nut_dicc:
try:
if ver:
measure = nutrient['measures'][0]
nut_dicc[nutrient['nutrient_id']] = float(measure['value'])
else:
nut_dicc[nutrient['nutrient_id']] = float(nutrient['value'])
except:
print(ndbno)
sys.exit(1)
ans = {'NDB_No': ndbno, 'USDA Name': name}
for key, value in nut_dicc.items():
ans[name_dicc[key]] = value
arr += [ans]
time.sleep(1)
df = | pd.DataFrame(arr) | pandas.DataFrame |
# 生成xml标注文件
import pandas as pd
from PIL import Image
data = pd.read_csv('data/train_labels.csv')
del data['AB']
data['temp'] = data['ID']
def save_xml(image_name, name_list, xmin_list, ymin_list, xmax_list, ymax_list):
xml_file = open('data/train_xml/' + image_name.split('.')[-2] + '.xml', 'w')
image_name = 'data/train_dataset/' + image_name
img = Image.open(image_name)
img_width = img.size[0]
img_height = img.size[1]
xml_file.write('<annotation>\n')
xml_file.write(' <folder>' + image_name.split('/')[-2] + '</folder>\n')
xml_file.write(' <filename>' + image_name.split('/')[-1] + '</filename>\n')
xml_file.write(' <path>' + image_name + '</path>\n')
xml_file.write(' <source>\n')
xml_file.write(' <database>Unknown</database>\n')
xml_file.write(' </source>\n')
xml_file.write(' <size>\n')
xml_file.write(' <width>' + str(img_width) + '</width>\n')
xml_file.write(' <height>' + str(img_height) + '</height>\n')
xml_file.write(' <depth>3</depth>\n')
xml_file.write(' </size>\n')
xml_file.write(' <segmented>0</segmented>\n')
data_panda = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Retrieve bikeshare trips data."""
# pylint: disable=invalid-name
import os
import re
from glob import glob
from typing import Dict, List
from zipfile import ZipFile
import pandas as pd
import pandera as pa
import requests
from src.utils import log_prefect
trips_schema = pa.DataFrameSchema(
columns={
"TRIP_ID": pa.Column(pa.Int),
"TRIP__DURATION": pa.Column(pa.Int),
"START_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"START_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"START_STATION_NAME": pa.Column(pd.StringDtype()),
"END_STATION_ID": pa.Column(
pa.Int,
nullable=True,
),
"END_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"END_STATION_NAME": pa.Column(pd.StringDtype()),
"BIKE_ID": pa.Column(pa.Int, nullable=True),
"USER_TYPE": pa.Column(
pd.StringDtype(),
checks=[
pa.Check(
lambda s: s.isin(["Annual Member", "Casual Member"]),
)
],
),
},
index=pa.Index(pa.Int),
)
urls_schema = pa.DataFrameSchema(
columns={
"url": pa.Column(pd.StringDtype()),
"name": pa.Column(pd.StringDtype()),
"format": pa.Column(pd.StringDtype()),
"state": pa.Column(pd.StringDtype()),
},
index=pa.Index(pa.Int),
)
get_data_status_schema = pa.DataFrameSchema(
columns={
"trips_file_name": pa.Column(pd.StringDtype()),
"last_modified_opendata": pa.Column(
pd.DatetimeTZDtype(tz="America/Toronto")
),
"parquet_file_exists": pa.Column(pd.BooleanDtype()),
"parquet_file_outdated": pa.Column(pd.BooleanDtype()),
"downloaded_file": pa.Column(pd.BooleanDtype()),
},
index=pa.Index(pa.Int),
)
raw_trips_schema = pa.DataFrameSchema(
columns={
"TRIP_ID": pa.Column(pa.Int),
"TRIP__DURATION": pa.Column(pa.Int),
"START_STATION_ID": pa.Column(pa.Int, nullable=True),
"START_STATION_NAME": pa.Column(pd.StringDtype()),
"START_TIME": pa.Column(
pa.Timestamp,
checks=[pa.Check(lambda s: s.dt.year.isin([2021, 2022]))],
),
"USER_TYPE": pa.Column(
pd.StringDtype(),
checks=[
pa.Check(
lambda s: s.isin(["Annual Member", "Casual Member"]),
)
],
),
},
index=pa.Index(pa.Int),
)
def get_local_csv_list(
raw_data_dir: str, years_wanted: List[int], use_prefect: bool = False
) -> List[str]:
"""Getting list of local CSV data files."""
log_prefect("Getting list of local CSV data files...", True, use_prefect)
files_by_year = [glob(f"{raw_data_dir}/*{y}*.csv") for y in years_wanted]
csvs = sorted([f for files_list in files_by_year for f in files_list])
log_prefect("Done.", False, use_prefect)
return csvs
def get_ridership_data(
raw_data_dir: str,
url: str,
last_modified_timestamp: pd.Timestamp,
use_prefect: bool = False,
) -> Dict[str, str]:
"""Download bikeshare trips data."""
# Split URL to get the file name
file_name = os.path.basename(url)
year = os.path.splitext(file_name)[0].split("-")[-1]
zip_filepath = os.path.join(raw_data_dir, file_name)
destination_dir = os.path.abspath(os.path.join(zip_filepath, os.pardir))
# Check if previously downloaded contents are up-to-dte
parquet_data_filepath = os.path.join(raw_data_dir, "agg_data.parquet.gzip")
has_parquet = os.path.exists(parquet_data_filepath)
if has_parquet:
parquet_file_modified_time = (
| pd.read_parquet(parquet_data_filepath) | pandas.read_parquet |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
## Functions
def preprocess_cat_columns(data):
data["Education"] = data["Education"].map({1:"Below College", 2:"College", 3:"Bachelor", 4:"Master",5:"Doctor"})
# attrition_df["EnvironmentSatisfaction"] = attrition_df["EnvironmentSatisfaction"].map({1:"Low", 2:"Medium", 3:"High", 4:"Very High"})
data["JobInvolvement"] = data["JobInvolvement"].map({1:"Low", 2:"Medium", 3:"High", 4:"Very High"})
# attrition_df["JobSatisfaction"] = attrition_df["JobSatisfaction"].map({1:"Low", 2:"Medium", 3:"High", 4:"Very High"})
# attrition_df["PerformanceRating"] = attrition_df["PerformanceRating"].map({1:"Low", 2:"Medium", 3:"High", 4:"Very High"})
# attrition_df["RelationshipSatisfaction"] = attrition_df["RelationshipSatisfaction"].map({1:"Low", 2:"Medium", 3:"High", 4:"Very High"})
# attrition_df["WorkLifeBalance"] = attrition_df["WorkLifeBalance"].map({1:"Bad", 2:"Good", 3:"Better", 4:"Best"})
return data
def num_pipeline_transformer(data):
numerics = ['int64']
num_attrs = data.select_dtypes(include=numerics)
num_pipeline = Pipeline([
('std_scaler', StandardScaler()),
])
return num_attrs, num_pipeline
def pipeline_transformer(data):
cat_attrs = ["Education", "JobInvolvement","BusinessTravel"]
# cat_attrs = ["BusinessTravel", "Department", "Education",
# "EducationField", "EnvironmentSatisfaction", "Gender",
# "JobInvolvement", "JobRole", "JobSatisfaction",
# "MaritalStatus", "OverTime", "PerformanceRating",
# "RelationshipSatisfaction", "WorkLifeBalance"]
num_attrs, num_pipeline = num_pipeline_transformer(data)
prepared_data = ColumnTransformer([
("num", num_pipeline, list(num_attrs)),
("cat", OneHotEncoder(), cat_attrs),
])
prepared_data.fit_transform(data)
return prepared_data
def predict_attrition(config, model):
if type(config) == dict:
df_prep = config.copy()
df = | pd.DataFrame(df_prep, index=[0]) | pandas.DataFrame |
# author: <NAME>, <NAME>
# date: 2020-01-22
'''This script reads in 5 .csv files located in the <file_path_data> folder:
1. All accepted vanity plates
2. All rejected vanity plates
3. Combined rejected and undersampled rejected plates
4. Feature training data
5. Target training data
And produces 3 plots that best add to the discussion on our
exploratory data analysis. Features are engineered from the
training feature set using sci-kit learn's CountVectorizer.
.png images of created plots are exported to the <file_path_img>
folder.
Usage: scripts/03_EDA.py --file_path_raw=<file_path_data> --file_path_pro=<file_path_pro> --accepted_plates_csv=<accepted_plates_csv> --rejected_plates_csv=<rejected_plates_csv> --reduced_plate_csv=<reduced_plate_csv> --X_train_csv=<X_train_csv> --y_train_csv=<y_train_csv> --file_path_img=<file_path_img>
Options:
--file_path_raw=<file_path_data> Path to raw data folder of .csv files
--file_path_pro=<file_path_pro> Path to processed data folder
--accepted_plates_csv=<accepted_plates_csv> filename of .csv with all accepted plates
--rejected_plates_csv=<rejected_plates_csv> filename of .csv with all negative plates
--reduced_plate_csv=<reduced_plate_csv> filename of .csv of undersampled accepted plates combined with rejected plates
--X_train_csv=<X_train_csv> filename of .csv with training feature dataset
--y_train_csv=<y_train_csv> filename of .csv with training target dataset
--file_path_img=<file_path_img> filepath to folder where images should be stored
'''
# file_path_raw = 'data/raw/'
# file_path_pro='data/processed/'
# accepted_plates_csv = 'accepted_plates.csv'
# rejected_plates_csv = 'rejected_plates.csv'
# reduced_plate_csv = 'full_vanity_plate_data.csv'
# X_train_csv = 'X_train.csv'
# y_train_csv = 'y_train.csv'
# file_path_img = 'docs/imgs/'
# #### Exploratory data analysis (EDA)
#
# In this section we perform EDA of the given dataset to use it to answer the research question.
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import altair as alt
from docopt import docopt
opt = docopt(__doc__)
def main(file_path_raw, file_path_pro, accepted_plates_csv, rejected_plates_csv, reduced_plate_csv, X_train_csv, y_train_csv, file_path_img):
# Load datasets
full_rejected = | pd.read_csv(file_path_raw + rejected_plates_csv) | pandas.read_csv |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import pandas as pd
from datetime import datetime, timedelta
from pytz import FixedOffset, timezone, utc
from random import randint
from enum import Enum
from sqlalchemy import create_engine, DateTime
from datetime import datetime
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL = "event_timestamp"
class EventTimestampType(Enum):
TZ_NAIVE = 0
TZ_AWARE_UTC = 1
TZ_AWARE_FIXED_OFFSET = 2
TZ_AWARE_US_PACIFIC = 3
def _convert_event_timestamp(event_timestamp: pd.Timestamp, t: EventTimestampType):
if t == EventTimestampType.TZ_NAIVE:
return event_timestamp
elif t == EventTimestampType.TZ_AWARE_UTC:
return event_timestamp.replace(tzinfo=utc)
elif t == EventTimestampType.TZ_AWARE_FIXED_OFFSET:
return event_timestamp.replace(tzinfo=utc).astimezone(FixedOffset(60))
elif t == EventTimestampType.TZ_AWARE_US_PACIFIC:
return event_timestamp.replace(tzinfo=utc).astimezone(timezone("US/Pacific"))
def create_orders_df(
customers,
drivers,
start_date,
end_date,
order_count,
infer_event_timestamp_col=False,
) -> pd.DataFrame:
"""
Example df generated by this function:
| order_id | driver_id | customer_id | order_is_success | event_timestamp |
+----------+-----------+-------------+------------------+---------------------+
| 100 | 5004 | 1007 | 0 | 2021-03-10 19:31:15 |
| 101 | 5003 | 1006 | 0 | 2021-03-11 22:02:50 |
| 102 | 5010 | 1005 | 0 | 2021-03-13 00:34:24 |
| 103 | 5010 | 1001 | 1 | 2021-03-14 03:05:59 |
"""
df = pd.DataFrame()
df["order_id"] = [order_id for order_id in range(100, 100 + order_count)]
df["driver_id"] = np.random.choice(drivers, order_count)
df["customer_id"] = np.random.choice(customers, order_count)
df["order_is_success"] = np.random.randint(0, 2, size=order_count).astype(np.int32)
if infer_event_timestamp_col:
df["e_ts"] = [
_convert_event_timestamp(
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms"),
EventTimestampType(3),
)
for idx, dt in enumerate(
pd.date_range(start=start_date, end=end_date, periods=order_count)
)
]
df.sort_values(
by=["e_ts", "order_id", "driver_id", "customer_id"], inplace=True,
)
else:
df[DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL] = [
_convert_event_timestamp(
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms"),
EventTimestampType(idx % 4),
)
for idx, dt in enumerate(
pd.date_range(start=start_date, end=end_date, periods=order_count)
)
]
df.sort_values(
by=[
DEFAULT_ENTITY_DF_EVENT_TIMESTAMP_COL,
"order_id",
"driver_id",
"customer_id",
],
inplace=True,
)
return df
def create_driver_hourly_stats_df(drivers, start_date, end_date) -> pd.DataFrame:
"""
Example df generated by this function:
| datetime | driver_id | conv_rate | acc_rate | avg_daily_trips | created |
|------------------+-----------+-----------+----------+-----------------+------------------|
| 2021-03-17 19:31 | 5010 | 0.229297 | 0.685843 | 861 | 2021-03-24 19:34 |
| 2021-03-17 20:31 | 5010 | 0.781655 | 0.861280 | 769 | 2021-03-24 19:34 |
| 2021-03-17 21:31 | 5010 | 0.150333 | 0.525581 | 778 | 2021-03-24 19:34 |
| 2021-03-17 22:31 | 5010 | 0.951701 | 0.228883 | 570 | 2021-03-24 19:34 |
| 2021-03-17 23:31 | 5010 | 0.819598 | 0.262503 | 473 | 2021-03-24 19:34 |
| | ... | ... | ... | ... | |
| 2021-03-24 16:31 | 5001 | 0.061585 | 0.658140 | 477 | 2021-03-24 19:34 |
| 2021-03-24 17:31 | 5001 | 0.088949 | 0.303897 | 618 | 2021-03-24 19:34 |
| 2021-03-24 18:31 | 5001 | 0.096652 | 0.747421 | 480 | 2021-03-24 19:34 |
| 2021-03-17 19:31 | 5005 | 0.142936 | 0.707596 | 466 | 2021-03-24 19:34 |
| 2021-03-17 19:31 | 5005 | 0.142936 | 0.707596 | 466 | 2021-03-24 19:34 |
"""
df_hourly = pd.DataFrame(
{
"datetime": [
pd.Timestamp(dt, unit="ms", tz="UTC").round("ms")
for dt in pd.date_range(
start=start_date, end=end_date, freq="1H", closed="left"
)
]
# include a fixed timestamp for get_historical_features in the quickstart
# + [
# pd.Timestamp(
# year=2021, month=4, day=12, hour=7, minute=0, second=0, tz="UTC"
# )
# ]
}
)
df_all_drivers = pd.DataFrame()
dates = df_hourly["datetime"].map(pd.Timestamp.date).unique()
for driver in drivers:
df_hourly_copy = df_hourly.copy()
df_hourly_copy["driver_id"] = driver
for date in dates:
df_hourly_copy.loc[
df_hourly_copy["datetime"].map(pd.Timestamp.date) == date,
"avg_daily_trips",
] = randint(10, 30)
df_all_drivers = | pd.concat([df_hourly_copy, df_all_drivers]) | pandas.concat |
import sys
import random as rd
import matplotlib
#matplotlib.use('Agg')
matplotlib.use('TkAgg') # revert above
import matplotlib.pyplot as plt
import os
import numpy as np
import glob
from pathlib import Path
from scipy.interpolate import UnivariateSpline
from scipy.optimize import curve_fit
import pickle
import pandas as pd
from findiff import FinDiff
from scipy.stats import chisquare
from scipy.stats import spearmanr
def powlaw(x, a, b) :
return np.power(10,a) * np.power(x, b)
def linlaw(x, a, b) :
return a + x * b
def curve_fit_log(xdata, ydata, sigma):
"""Fit data to a power law with weights according to a log scale"""
# Weights according to a log scale
# Apply fscalex
xdata_log = np.log10(xdata)
# Apply fscaley
ydata_log = np.log10(ydata)
sigma_log = np.log10(sigma)
# Fit linear
popt_log, pcov_log = curve_fit(linlaw, xdata_log, ydata_log,
sigma=sigma_log)
#print(popt_log, pcov_log)
# Apply fscaley^-1 to fitted data
ydatafit_log = np.power(10, linlaw(xdata_log, *popt_log))
# There is no need to apply fscalex^-1 as original data is already available
return (popt_log, pcov_log, ydatafit_log)
def big_data_plotter(data_frame, x_name, y_name, index, ax, label, colour, style, lw, figsize):
# plts big_data.data
data_table = data_frame['dfs'][index]
return data_table.plot(ax=ax, kind='line', x=x_name, y=y_name, label=label,
c=colour, style=style, lw=lw, figsize=figsize)
def clipped_h_data_plotter(data_frame, index):
# plts big_data.data
h_data = data_frame['dfs'][index]['Height [Mm]'].dropna()
x = h_data.index.values
k = 3 # 5th degree spline
n = len(h_data)
s = 1#n - np.sqrt(2*n) # smoothing factor
spline_1 = UnivariateSpline(x, h_data, k=k, s=s).derivative(n=1)
sign_change_indx = np.where(np.diff(np.sign(spline_1(x))))[0]
if len(sign_change_indx)>1:
sign_change_indx = sign_change_indx[1]
else:
sign_change_indx = len(h_data)
return x[:sign_change_indx], h_data[:sign_change_indx]
def ballistic_flight(v0, g, t):
# assumes perfectly verticle launch and are matching units
# v0-initial velocity
# g-gravitational acceleration
# t-np time array
x = v0*t
y = v0*t-0.5*g*t**2
y = np.where(y<0,0,y)
t_apex = v0/g
x_apex = v0*t_apex
y_apex = v0*t_apex-0.5*g*(t_apex)**2
return x, y, t_apex, x_apex, y_apex
i = 0
shuff = 0
SMALL_SIZE = 40
MEDIUM_SIZE = SMALL_SIZE+2
BIGGER_SIZE = MEDIUM_SIZE+2
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=18) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#path_2_shared_drive = '/run/user/1001/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/j'
path_2_shared_drive = '/run/user/1000/gvfs/smb-share:server=uosfstore.shef.ac.uk,share=shared/mhd_jet1/User/smp16fm/j'
#dir_paths = glob.glob('data/*')
#data set for paper
dir_paths = glob.glob('data/run3/*')
##dat srt for high dt
#dir_paths = glob.glob('data/high_dt/*')
# constants
unit_length = 1e9 # cm
DOMIAN = [5*unit_length, 3*unit_length]
unit_temperature = 1e6 # K
unit_numberdensity = 1e9 # cm^-3
g_cm3_to_kg_m3 = 1e3
dyne_cm2_to_Pa = 1e-1
cm_to_km = 1e-5
m_to_km = 1e-3
km_to_Mm = 1e-3
cm_to_Mm = 1e-8
s_to_min = 1/60
earth_g = 9.80665 #m s-2
sun_g = 28.02*earth_g*m_to_km # km s-2
unit_density = 2.3416704877999998E-015
unit_velocity = 11645084.295622544
unit_pressure = 0.31754922400000002
unit_magenticfield = 1.9976088799077159
unit_time = unit_length/unit_velocity
# I messed up time scaling on data collection
TIME_CORRECTION_FACTOR = 10/unit_time
unit_mass = unit_density*unit_length**3
unit_specific_energy = (unit_length/unit_time)**2
# options
# IMPORTANT TO CHANGE dt
dt = unit_time/20
#dt = unit_time/200 # high dt
plot_h_vs_t = False
plot_w_vs_t = True
plot_error_bars = False
plot_hmax_vs_B = True
plot_hmax_vs_A = True
power_law_fit = True
plot_hmax_vs_dt = True
data_check = False
sf = [0.60, 0.55, 0.5, 0.5]
plot_mean_w_vs_BAdt = True
lw = 3# 2.5#
# how to read pickels
max_h_data_set = pd.read_pickle(dir_paths[1])
big_data_set = | pd.read_pickle(dir_paths[0]) | pandas.read_pickle |
import boto3
import logging, os
import pandas as pd
from botocore.exceptions import ClientError
s3 = boto3.client('s3')
def upload_file(file_name, bucket, object_name=None):
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
try:
response = s3.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
def add_record_ids(table_data):
revised_data = []
for record in table_data:
fields = record['fields']
airtable_id = record['id']
fields['airtable_id'] = airtable_id
revised_data.append(fields)
return revised_data
def format_linked_records(linked_fields,table_df,tables,my_base):
# This function replaces the array of Record IDs stored in Linked Record fields
# with an array of Record Names. (aka the value in the primary field of the linked table) by:
# - Identifying the Linked Table and Primary Field
# - Retrieving the Linked Table Data in a DataFrame
# - Isolating the Record IDs of each linked record by exploding the original dataframe
# - Merging the Linked Table and Original Table DataFrames and Regrouping the table by Record ID
# - Delteting the Linked Column and renaming the merged column to match the expected schema.
# Table A
# Name Publish Date Author(s) [Linked Field]
# Through the Looking Glass 12/17/1871 ['rec12345678']
# All the President's Men 06/15/1974 ['rec09876543', 'rec546372829']
# Table B
# Author Birthdate Birthplace RecordId
# <NAME> 01/27/1832 London 'rec12345678'
# <NAME> 02/14/1944 Washington, DC 'rec09876543'
# <NAME> 03/26/1946 Geneva, IL 'rec546372829'
# Table A (After Formula)
# Name Publish Date Author(s) [Linked Field]
# Through the Looking Glass 12/17/1871 [Lewis Carroll]
# All the President's Men 06/15/1974 [<NAME>, <NAME>]
for field in linked_fields:
print(f"Formatting Linked Records for {field['name']} field")
# Find Linked Table and Field Names from Airtable Schema
linked_field_name = field['name']
linked_table_id = field['options']['linkedTableId'] # Get linked table
table_ids = [x['id'] for x in tables]
linked_table_index = table_ids.index(linked_table_id)
linked_table = tables[linked_table_index]
linked_table_fields = linked_table['fields']
linked_primary_field = linked_table_fields[0]['name'] # Get primary field of linked table
# Get Linked Table Data
linked_table_data_raw = my_base.all(linked_table_id,fields=linked_primary_field) # Get linked table data only with Primary Field (pyAirtable)
linked_table_data = add_record_ids(linked_table_data_raw) #Add Record IDs to the dataframe to compare between linked field and the linked table records
linked_table_df = pd.DataFrame(linked_table_data)
linked_table_df.columns = [f'{x}_linked_record_x' for x in linked_table_df.columns] # Change the name of the columns in the linked table so they don't overlap with column names in original table
# Format Data Frame
linked_df = table_df.explode(linked_field_name) # Because multiple linked records are stored as an array of record ids, we'll need to explode the table to isolate each value
linked_df = | pd.merge(linked_df,linked_table_df,how='left',left_on=linked_field_name,right_on=linked_table_df.columns[1]) | pandas.merge |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = | pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt") | pandas.Series |
from __future__ import print_function
import os
import pandas as pd
import xgboost as xgb
import time
import shutil
from sklearn import preprocessing
from sklearn.cross_validation import train_test_split
import numpy as np
from sklearn.utils import shuffle
def archive_results(filename,results,algo,script):
"""
:type algo: basestring
:type script: basestring
:type results: DataFrame
"""
#assert results == pd.DataFrame
now=time.localtime()[0:5]
dirname='../archive'
subdirfmt='%4d-%02d-%02d-%02d-%02d'
subdir=subdirfmt %now
if not os.path.exists(os.path.join(dirname,str(algo))):
os.mkdir(os.path.join(dirname,str(algo)))
dir_to_create=os.path.join(dirname,str(algo),subdir)
if not os.path.exists(dir_to_create):
os.mkdir(dir_to_create)
os.chdir(dir_to_create)
results.to_csv(filename,index=False,float_format='%.6f')
shutil.copy2(script,'.')
return
###############################################################################################
def preprocess_data(train,test):
y=train['is_screener']
id_test=test['patient_id']
train=train.drop(['patient_id','is_screener'],axis=1)
test=test.drop(['patient_id'],axis=1)
for f in train.columns:
if train[f].dtype == 'object':
print(f)
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
return id_test,test,train,y
os.chdir('/home/cuoco/KC/cervical-cancer-screening/src')
trainfile=('../input/patients_train.csv.gz')
testfile=('../input/patients_test.csv.gz')
train=pd.read_csv(trainfile,low_memory=False )
test=pd.read_csv(testfile,low_memory=False )
train_ex_file=('../input/train_patients_to_exclude.csv.gz')
train_ex=pd.read_csv(train_ex_file,low_memory=False)
train=train[train.patient_id.isin(train_ex.patient_id)==False]
test_ex_file=('../input/test_patients_to_exclude.csv.gz')
test_ex=pd.read_csv(test_ex_file,low_memory=False)
test=test[test.patient_id.isin(test_ex.patient_id)==False]
print(train.shape,test.shape)
surgical=pd.read_csv('../features/surgical_pap.csv.gz')
diagnosis=pd.read_csv('../features/diagnosis_hpv.csv.gz')
procedure_cervi=pd.read_csv('../features/procedure_cervi.csv.gz')
procedure_hpv=pd.read_csv('../features/procedure_hpv.csv.gz')
procedure_vaccine=pd.read_csv('../features/procedure_vaccine.csv.gz')
procedure_vagi=pd.read_csv('../features/procedure_vagi.csv.gz')
procedure_plan_type=pd.read_csv('../features/procedure_plan_type.csv.gz')
rx_payment=pd.read_csv('../features/rx_payment.csv.gz')
train_pract_screen_ratio=pd.read_csv('../features/train_pract_screen_ratio.csv.gz')
test_pract_screen_ratio=pd.read_csv('../features/test_pract_screen_ratio.csv.gz')
visits=pd.read_csv('../features/visits.csv.gz')
diagnosis_train_counts=pd.read_csv('../features/train_diagnosis_cbsa_counts.csv.gz')
#print (diagnosis_train_counts.shape)
#print(np.unique(len(diagnosis_train_counts['patient_id'])))
diagnosis_test_counts=pd.read_csv('../features/test_diagnosis_cbsa_counts.csv.gz')
state_screen_percent=pd.read_csv('../features/state_screen_percent.csv')
days_supply_distribution=pd.read_csv('../features/days_supply_distribution.csv')
surgical_procedure_type_code_counts_train= | pd.read_csv('../features/surgical_procedure_type_code_counts_train.csv.gz') | pandas.read_csv |
"""Class to read and store all the data from the bucky input graph."""
import datetime
import logging
import warnings
from functools import partial
import networkx as nx
import pandas as pd
from joblib import Memory
from numpy import RankWarning
from ..numerical_libs import sync_numerical_libs, xp
from ..util.cached_prop import cached_property
from ..util.extrapolate import interp_extrap
from ..util.power_transforms import YeoJohnson
from ..util.read_config import bucky_cfg
from ..util.rolling_mean import rolling_mean, rolling_window
from ..util.spline_smooth import fit, lin_reg
from .adjmat import buckyAij
memory = Memory(bucky_cfg["cache_dir"], verbose=0, mmap_mode="r")
@memory.cache
def cached_scatter_add(a, slices, value):
"""scatter_add() thats cached by joblib."""
ret = a.copy()
xp.scatter_add(ret, slices, value)
return ret
class buckyGraphData:
"""Contains and preprocesses all the data imported from an input graph file."""
# pylint: disable=too-many-public-methods
@staticmethod
@sync_numerical_libs
def clean_historical_data(cum_case_hist, cum_death_hist, inc_hosp, start_date, g_data, force_save_plots=False):
"""Preprocess the historical data to smooth it and remove outliers."""
n_hist = cum_case_hist.shape[1]
adm1_case_hist = g_data.sum_adm1(cum_case_hist)
adm1_death_hist = g_data.sum_adm1(cum_death_hist)
adm1_diff_mask_cases = (
xp.around(xp.diff(adm1_case_hist, axis=1, prepend=adm1_case_hist[:, 0][..., None]), 2) >= 1.0
)
adm1_diff_mask_death = (
xp.around(xp.diff(adm1_death_hist, axis=1, prepend=adm1_death_hist[:, 0][..., None]), 2) >= 1.0
)
adm1_enough_case_data = (adm1_case_hist[:, -1] - adm1_case_hist[:, 0]) > n_hist
adm1_enough_death_data = (adm1_death_hist[:, -1] - adm1_death_hist[:, 0]) > n_hist
adm1_enough_data = adm1_enough_case_data | adm1_enough_death_data
valid_adm1_mask = adm1_diff_mask_cases | adm1_diff_mask_death
valid_adm1_case_mask = valid_adm1_mask
valid_adm1_death_mask = valid_adm1_mask
for i in range(adm1_case_hist.shape[0]):
data = adm1_case_hist[i]
rw = rolling_window(data, 3, center=True)
mask = xp.around(xp.abs((data - xp.mean(lin_reg(rw, return_fit=True), axis=1)) / data), 2) < 0.1
valid_adm1_case_mask[i] = valid_adm1_mask[i] & mask
valid_case_mask = valid_adm1_case_mask[g_data.adm1_id]
valid_death_mask = valid_adm1_death_mask[g_data.adm1_id]
enough_data = adm1_enough_data[g_data.adm1_id]
new_cum_cases = xp.empty(cum_case_hist.shape)
new_cum_deaths = xp.empty(cum_case_hist.shape)
x = xp.arange(0, new_cum_cases.shape[1])
for i in range(new_cum_cases.shape[0]):
try:
with warnings.catch_warnings():
warnings.simplefilter("error")
if ~enough_data[i]:
new_cum_cases[i] = cum_case_hist[i]
new_cum_deaths[i] = cum_death_hist[i]
continue
new_cum_cases[i] = interp_extrap(
x,
x[valid_case_mask[i]],
cum_case_hist[i, valid_case_mask[i]],
n_pts=7,
order=2,
)
new_cum_deaths[i] = interp_extrap(
x,
x[valid_death_mask[i]],
cum_death_hist[i, valid_death_mask[i]],
n_pts=7,
order=2,
)
except (TypeError, RankWarning, ValueError) as e:
logging.error(e)
# TODO remove massive outliers here, they lead to gibbs-like wiggling in the cumulative fitting
new_cum_cases = xp.around(new_cum_cases, 6) + 0.0 # plus zero to convert -0 to 0.
new_cum_deaths = xp.around(new_cum_deaths, 6) + 0.0
# Apply spline smoothing
df = max(1 * n_hist // 7 - 1, 4)
alp = 1.5
tol = 1.0e-5 # 6
gam_inc = 2.4 # 8.
gam_cum = 2.4 # 8.
# df2 = int(10 * n_hist ** (2.0 / 9.0)) + 1 # from gam book section 4.1.7
gam_inc = 8.0 # 2.4 # 2.4
gam_cum = 8.0 # 2.4 # 2.4
# tol = 1e-3
spline_cum_cases = xp.clip(
fit(
new_cum_cases,
df=df,
alp=alp,
gamma=gam_cum,
tol=tol,
label="PIRLS Cumulative Cases",
standardize=False,
),
a_min=0.0,
a_max=None,
)
spline_cum_deaths = xp.clip(
fit(
new_cum_deaths,
df=df,
alp=alp,
gamma=gam_cum,
tol=tol,
label="PIRLS Cumulative Deaths",
standardize=False,
),
a_min=0.0,
a_max=None,
)
inc_cases = xp.clip(xp.gradient(spline_cum_cases, axis=1, edge_order=2), a_min=0.0, a_max=None)
inc_deaths = xp.clip(xp.gradient(spline_cum_deaths, axis=1, edge_order=2), a_min=0.0, a_max=None)
inc_cases = xp.around(inc_cases, 6) + 0.0
inc_deaths = xp.around(inc_deaths, 6) + 0.0
inc_hosp = xp.around(inc_hosp, 6) + 0.0
# power_transform1 = BoxCox()
# power_transform2 = BoxCox()
# power_transform3 = BoxCox()
power_transform1 = YeoJohnson()
power_transform2 = YeoJohnson()
power_transform3 = YeoJohnson()
# Need to clip negatives for BoxCox
# inc_cases = xp.clip(inc_cases, a_min=0., a_max=None)
# inc_deaths = xp.clip(inc_deaths, a_min=0., a_max=None)
# inc_hosp = xp.clip(inc_hosp, a_min=0., a_max=None)
inc_cases = power_transform1.fit(inc_cases)
inc_deaths = power_transform2.fit(inc_deaths)
inc_hosp2 = power_transform3.fit(inc_hosp)
inc_cases = xp.around(inc_cases, 6) + 0.0
inc_deaths = xp.around(inc_deaths, 6) + 0.0
inc_hosp = xp.around(inc_hosp, 6) + 0.0
inc_fit_args = {
"alp": alp,
"df": df, # df // 2 + 2 - 1,
"dist": "g",
"standardize": False, # True,
"gamma": gam_inc,
"tol": tol,
"clip": (0.0, None),
"bootstrap": False, # True,
}
all_cached = (
fit.check_call_in_cache(inc_cases, **inc_fit_args)
and fit.check_call_in_cache(inc_deaths, **inc_fit_args)
and fit.check_call_in_cache(inc_hosp2, **inc_fit_args)
)
spline_inc_cases = fit(
inc_cases,
**inc_fit_args,
label="PIRLS Incident Cases",
)
spline_inc_deaths = fit(
inc_deaths,
**inc_fit_args,
label="PIRLS Incident Deaths",
)
spline_inc_hosp = fit(
inc_hosp2,
**inc_fit_args,
label="PIRLS Incident Hospitalizations",
)
for _ in range(5):
resid = spline_inc_cases - inc_cases
stddev = xp.quantile(xp.abs(resid), axis=1, q=0.682)
clean_resid = xp.clip(resid / (6.0 * stddev[:, None] + 1e-8), -1.0, 1.0)
robust_weights = xp.clip(1.0 - clean_resid ** 2.0, 0.0, 1.0) ** 2.0
spline_inc_cases = fit(inc_cases, **inc_fit_args, label="PIRLS Incident Cases", w=robust_weights)
resid = spline_inc_deaths - inc_deaths
stddev = xp.quantile(xp.abs(resid), axis=1, q=0.682)
clean_resid = xp.clip(resid / (6.0 * stddev[:, None] + 1e-8), -1.0, 1.0)
robust_weights = xp.clip(1.0 - clean_resid ** 2.0, 0.0, 1.0) ** 2.0
spline_inc_deaths = fit(inc_deaths, **inc_fit_args, label="PIRLS Incident Deaths", w=robust_weights)
resid = spline_inc_hosp - inc_hosp2
stddev = xp.quantile(xp.abs(resid), axis=1, q=0.682)
clean_resid = xp.clip(resid / (6.0 * stddev[:, None] + 1e-8), -1.0, 1.0)
robust_weights = xp.clip(1.0 - clean_resid ** 2.0, 0.0, 1.0) ** 2.0
spline_inc_hosp = fit(inc_hosp2, **inc_fit_args, label="PIRLS Incident Hosps", w=robust_weights)
spline_inc_cases = power_transform1.inv(spline_inc_cases)
spline_inc_deaths = power_transform2.inv(spline_inc_deaths)
spline_inc_hosp = power_transform3.inv(spline_inc_hosp)
# Only plot if the fits arent in the cache already
# TODO this wont update if doing a historical run thats already cached
save_plots = (not all_cached) or force_save_plots
if save_plots:
# pylint: disable=import-outside-toplevel
import matplotlib
matplotlib.use("agg")
import pathlib
import matplotlib.pyplot as plt
import numpy as np
import tqdm
import us
# TODO we should drop these in raw_output_dir and have postprocess put them in the run's dir
# TODO we could also drop the data for viz.plot...
# if we just drop the data this should be moved to viz.historical_plots or something
out_dir = pathlib.Path(bucky_cfg["output_dir"]) / "_historical_fit_plots"
out_dir.mkdir(parents=True, exist_ok=True)
out_dir.touch(exist_ok=True) # update mtime
diff_cases = xp.diff(g_data.sum_adm1(cum_case_hist), axis=1)
diff_deaths = xp.diff(g_data.sum_adm1(cum_death_hist), axis=1)
fips_map = us.states.mapping("fips", "abbr")
non_state_ind = xp.all(g_data.sum_adm1(cum_case_hist) < 1, axis=1)
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(15, 10))
x = xp.arange(cum_case_hist.shape[1])
# TODO move the sum_adm1 calls out here, its doing that reduction ALOT
for i in tqdm.tqdm(range(g_data.max_adm1 + 1), desc="Ploting fits", dynamic_ncols=True):
if non_state_ind[i]:
continue
fips_str = str(i).zfill(2)
if fips_str in fips_map:
name = fips_map[fips_str] + " (" + fips_str + ")"
else:
name = fips_str
# fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(15, 10))
ax = fig.subplots(nrows=2, ncols=4)
ax[0, 0].plot(xp.to_cpu(g_data.sum_adm1(cum_case_hist)[i]), label="Cumulative Cases")
ax[0, 0].plot(xp.to_cpu(g_data.sum_adm1(spline_cum_cases)[i]), label="Fit")
ax[0, 0].fill_between(
xp.to_cpu(x),
xp.to_cpu(xp.min(adm1_case_hist[i])),
xp.to_cpu(xp.max(adm1_case_hist[i])),
where=xp.to_cpu(~valid_adm1_case_mask[i]),
color="grey",
alpha=0.2,
)
ax[1, 0].plot(xp.to_cpu(g_data.sum_adm1(cum_death_hist)[i]), label="Cumulative Deaths")
ax[1, 0].plot(xp.to_cpu(g_data.sum_adm1(spline_cum_deaths)[i]), label="Fit")
ax[1, 0].fill_between(
xp.to_cpu(x),
xp.to_cpu(xp.min(adm1_death_hist[i])),
xp.to_cpu(xp.max(adm1_death_hist[i])),
where=xp.to_cpu(~valid_adm1_death_mask[i]),
color="grey",
alpha=0.2,
)
ax[0, 1].plot(xp.to_cpu(diff_cases[i]), label="Incident Cases")
ax[0, 1].plot(xp.to_cpu(g_data.sum_adm1(spline_inc_cases)[i]), label="Fit")
ax[0, 1].fill_between(
xp.to_cpu(x),
xp.to_cpu(xp.min(diff_cases[i])),
xp.to_cpu(xp.max(diff_cases[i])),
where=xp.to_cpu(~valid_adm1_case_mask[i]),
color="grey",
alpha=0.2,
)
ax[0, 2].plot(xp.to_cpu(diff_deaths[i]), label="Incident Deaths")
ax[0, 2].plot(xp.to_cpu(g_data.sum_adm1(spline_inc_deaths)[i]), label="Fit")
ax[0, 2].fill_between(
xp.to_cpu(x),
xp.to_cpu(xp.min(diff_deaths[i])),
xp.to_cpu(xp.max(diff_deaths[i])),
where=xp.to_cpu(~valid_adm1_death_mask[i]),
color="grey",
alpha=0.2,
)
ax[1, 1].plot(xp.to_cpu(xp.log1p(diff_cases[i])), label="Log(Incident Cases)")
ax[1, 1].plot(xp.to_cpu(xp.log1p(g_data.sum_adm1(spline_inc_cases)[i])), label="Fit")
ax[1, 2].plot(xp.to_cpu(xp.log1p(diff_deaths[i])), label="Log(Incident Deaths)")
ax[1, 2].plot(xp.to_cpu(xp.log1p(g_data.sum_adm1(spline_inc_deaths)[i])), label="Fit")
ax[0, 3].plot(xp.to_cpu(inc_hosp[i]), label="Incident Hosp")
ax[0, 3].plot(xp.to_cpu(spline_inc_hosp[i]), label="Fit")
ax[1, 3].plot(xp.to_cpu(xp.log1p(inc_hosp[i])), label="Log(Incident Hosp)")
ax[1, 3].plot(xp.to_cpu(xp.log1p(spline_inc_hosp[i])), label="Fit")
log_cases = xp.to_cpu(xp.log1p(xp.clip(diff_cases[i], a_min=0.0, a_max=None)))
log_deaths = xp.to_cpu(xp.log1p(xp.clip(diff_deaths[i], a_min=0.0, a_max=None)))
if xp.any(xp.array(log_cases > 0)):
ax[1, 1].set_ylim([0.9 * xp.min(log_cases[log_cases > 0]), 1.1 * xp.max(log_cases)])
if xp.any(xp.array(log_deaths > 0)):
ax[1, 2].set_ylim([0.9 * xp.min(log_deaths[log_deaths > 0]), 1.1 * xp.max(log_deaths)])
ax[0, 0].legend()
ax[1, 0].legend()
ax[0, 1].legend()
ax[1, 1].legend()
ax[0, 2].legend()
ax[1, 2].legend()
ax[0, 3].legend()
ax[1, 3].legend()
fig.suptitle(name)
fig.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(out_dir / (name + ".png"))
fig.clf()
plt.close(fig)
plt.close("all")
spline_inc_hosp_adm2 = (
spline_inc_hosp[g_data.adm1_id] * (g_data.Nj / g_data.adm1_Nj[g_data.adm1_id])[:, None]
)
df = {
"cum_cases_fitted": spline_cum_cases,
"cum_deaths_fitted": spline_cum_deaths,
"inc_cases_fitted": spline_inc_cases,
"inc_deaths_fitted": spline_inc_deaths,
"inc_hosp_fitted": spline_inc_hosp_adm2,
}
df["adm2"] = xp.broadcast_to(g_data.adm2_id[:, None], spline_cum_cases.shape)
df["adm1"] = xp.broadcast_to(g_data.adm1_id[:, None], spline_cum_cases.shape)
dates = [str(start_date + datetime.timedelta(days=int(i))) for i in np.arange(-n_hist + 1, 1)]
df["date"] = np.broadcast_to(np.array(dates)[None, :], spline_cum_cases.shape)
for k in df:
df[k] = xp.ravel(xp.to_cpu(df[k]))
# TODO sort columns
df = | pd.DataFrame(df) | pandas.DataFrame |
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing
import logging
from math import log
from frozendict import FrozenOrderedDict
import numpy as np
from scipy.sparse import issparse
from scipy.special import digamma, gamma
import pandas as pd # type: ignore
from d3m import container, utils as d3m_utils
from d3m import exceptions
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
from distil.utils import CYTHON_DEP
from distil.primitives.enrich_dates import EnrichDatesPrimitive
from sklearn.feature_selection import mutual_info_regression, mutual_info_classif
from sklearn import metrics
from sklearn import preprocessing
from sklearn import utils as skl_utils
from sklearn.neighbors import NearestNeighbors
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.utils import random
import version
__all__ = ("MIRankingPrimitive",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
target_col_index = hyperparams.Hyperparameter[typing.Optional[int]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Index of target feature to rank against.",
)
k = hyperparams.Hyperparameter[typing.Optional[int]](
default=3,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Number of clusters for k-nearest neighbors",
)
return_as_metadata = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If True will return each columns rank in their respective metadata as the key 'rank'",
)
sub_sample = hyperparams.Hyperparameter[bool](
default=False,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="Whether or not to run MI ranking on a subset of the dataset",
)
sub_sample_size = hyperparams.Hyperparameter[typing.Optional[int]](
default=1000,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="If sub-sampling, the size of the subsample",
)
class MIRankingPrimitive(
transformer.TransformerPrimitiveBase[
container.DataFrame, container.DataFrame, Hyperparams
]
):
"""
Feature ranking based on a mutual information between features and a selected
target. Will rank any feature column with a semantic type of Float, Boolean,
Integer or Categorical, and a corresponding structural type of int, float or str.
Features that could not be ranked are excluded from the returned set.
Parameters
----------
inputs : A container.Dataframe with columns containing numeric or string data.
Returns
-------
output : A DataFrame containing (col_idx, col_name, score) tuples for each ranked feature.
"""
# allowable target column types
_discrete_types = (
"http://schema.org/Boolean",
"http://schema.org/Integer",
"https://metadata.datadrivendiscovery.org/types/CategoricalData",
)
_continous_types = ("http://schema.org/Float",)
_text_semantic = ("http://schema.org/Text",)
_roles = (
"https://metadata.datadrivendiscovery.org/types/Attribute",
"https://metadata.datadrivendiscovery.org/types/Target",
"https://metadata.datadrivendiscovery.org/types/TrueTarget",
"https://metadata.datadrivendiscovery.org/types/SuggestedTarget",
)
_structural_types = set((int, float))
_semantic_types = set(_discrete_types).union(_continous_types)
_random_seed = 100
__author__ = ("Un<NAME>",)
metadata = metadata_base.PrimitiveMetadata(
{
"id": "a31b0c26-cca8-4d54-95b9-886e23df8886",
"version": version.__version__,
"name": "Mutual Information Feature Ranking",
"python_path": "d3m.primitives.feature_selection.mutual_info_classif.DistilMIRanking",
"keywords": ["vector", "columns", "dataframe"],
"source": {
"name": "Distil",
"contact": "mailto:<EMAIL>",
"uris": [
"https://github.com/uncharted-distil/distil-primitives-contrib/blob/main/distil_primitives_contrib/mi_ranking.py",
"https://github.com/uncharted-distil/distil-primitives-contrib/",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=d3m_utils.current_git_commit(
os.path.dirname(__file__)
),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.MUTUAL_INFORMATION,
],
"primitive_family": metadata_base.PrimitiveFamily.FEATURE_SELECTION,
}
)
@classmethod
def _can_use_column(
cls,
inputs_metadata: metadata_base.DataMetadata,
column_index: typing.Optional[int],
) -> bool:
column_metadata = inputs_metadata.query(
(metadata_base.ALL_ELEMENTS, column_index)
)
valid_struct_type = (
column_metadata.get("structural_type", None) in cls._structural_types
)
semantic_types = column_metadata.get("semantic_types", [])
valid_semantic_type = (
len(set(cls._semantic_types).intersection(semantic_types)) > 0
)
valid_role_type = len(set(cls._roles).intersection(semantic_types)) > 0
return valid_struct_type and valid_semantic_type
@classmethod
def _append_rank_info(
cls,
inputs: container.DataFrame,
result: typing.List[typing.Tuple[int, str, float]],
rank_np: np.array,
rank_df: pd.DataFrame,
) -> typing.List[typing.Tuple[int, str, float]]:
for i, rank in enumerate(rank_np):
col_name = rank_df.columns.values[i]
result.append((inputs.columns.get_loc(col_name), col_name, rank))
return result
def produce(
self,
*,
inputs: container.DataFrame,
timeout: float = None,
iterations: int = None
) -> base.CallResult[container.DataFrame]:
cols = ["idx", "name", "rank"]
# Make sure the target column is of a valid type and return no ranked features if it isn't.
target_idx = self.hyperparams["target_col_index"]
if not self._can_use_column(inputs.metadata, target_idx):
return base.CallResult(container.DataFrame(data={}, columns=cols))
# check if target is discrete or continuous
semantic_types = inputs.metadata.query_column(target_idx)["semantic_types"]
discrete = len(set(semantic_types).intersection(self._discrete_types)) > 0
# make a copy of the inputs and clean out any missing data
feature_df = inputs.copy()
if self.hyperparams["sub_sample"]:
sub_sample_size = (
self.hyperparams["sub_sample_size"]
if self.hyperparams["sub_sample_size"] < inputs.shape[0]
else inputs.shape[0]
)
rows = random.sample_without_replacement(inputs.shape[0], sub_sample_size)
feature_df = feature_df.iloc[rows, :]
# makes sure that if an entire column is NA, we remove that column, so as to not remove ALL rows
cols_to_drop = feature_df.columns[
feature_df.isna().sum() == feature_df.shape[0]
]
feature_df.drop(columns=cols_to_drop, inplace=True)
feature_df.dropna(inplace=True)
# split out the target feature
target_df = feature_df.iloc[
:, feature_df.columns.get_loc(inputs.columns[target_idx])
]
# drop features that are not compatible with ranking
feature_indices = set(
inputs.metadata.list_columns_with_semantic_types(self._semantic_types)
)
role_indices = set(
inputs.metadata.list_columns_with_semantic_types(self._roles)
)
feature_indices = feature_indices.intersection(role_indices)
feature_indices.remove(target_idx)
for categ_ind in inputs.metadata.list_columns_with_semantic_types(
("https://metadata.datadrivendiscovery.org/types/CategoricalData",)
):
if categ_ind in feature_indices:
if (
np.unique(inputs[inputs.columns[categ_ind]]).shape[0]
== inputs.shape[0]
):
feature_indices.remove(categ_ind)
elif (
inputs.metadata.query((metadata_base.ALL_ELEMENTS, categ_ind))[
"structural_type"
]
== str
):
feature_df[inputs.columns[categ_ind]] = pd.to_numeric(
feature_df[inputs.columns[categ_ind]]
)
text_indices = inputs.metadata.list_columns_with_semantic_types(
self._text_semantic
)
tfv = TfidfVectorizer(max_features=20)
column_to_text_features = {}
text_feature_indices = []
for text_index in text_indices:
if (
text_index not in feature_indices
and text_index in role_indices
and text_index != target_idx
):
word_features = tfv.fit_transform(
feature_df[inputs.columns[text_index]]
)
if issparse(word_features):
column_to_text_features[
inputs.columns[text_index]
] = | pd.DataFrame.sparse.from_spmatrix(word_features) | pandas.DataFrame.sparse.from_spmatrix |
# Import libraries
import os
import sys
import anemoi as an
import pandas as pd
import numpy as np
import pyodbc
from datetime import datetime
import requests
import collections
import json
import urllib3
def return_between_date_query_string(start_date, end_date):
if start_date != None and end_date != None:
start_end_str = '''AND [TimeStampLocal] >= '%s' AND [TimeStampLocal] < '%s' ''' %(start_date, end_date)
elif start_date != None and end_date == None:
start_end_str = '''AND [TimeStampLocal] >= '%s' ''' %(start_date)
elif start_date == None and end_date != None:
start_end_str = '''AND [TimeStampLocal] < '%s' ''' %(end_date)
else:
start_end_str = ''
return start_end_str
def sql_or_string_from_mvs_ids(mvs_ids):
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
return or_string
def sql_list_from_mvs_ids(mvs_ids):
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
mvs_ids_list = ','.join([f"({mvs_id}_1)" for mvs_id in mvs_ids])
return mvs_ids_list
def rename_mvs_id_column(col, names, types):
name = names[int(col.split('_')[0])]
data_type = types[col.split('_')[1]]
return f'{name}_{data_type}'
# Define DataBase class
class M2D2(object):
'''Class to connect to RAG M2D2 PRD database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is::
import anemoi as an
m2d2 = an.io.database.M2D2()
:Parameters:
:Returns:
out: an.M2D2 object connected to M2D2
'''
self.database = 'M2D2'
server = '10.1.15.53' # PRD
#server = 'SDHQRAGDBDEV01\RAGSQLDBSTG' #STG
db = 'M2D2_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def connection_check(self, database):
return self.database == database
def masts(self):
'''
:Returns:
out: DataFrame of all met masts with measured data in M2D2
Example::
import anemoi as an
m2d2 = an.io.database.M2D2()
m2d2.masts()
'''
if not self.connection_check('M2D2'):
raise ValueError('Need to connect to M2D2 to retrieve met masts. Use anemoi.DataBase(database="M2D2")')
sql_query_masts = '''
SELECT [Project]
,[AssetID]
,[wmm_id]
,[mvs_id]
,[Name]
,[Type]
,[StartDate]
,[StopDate]
FROM [M2D2_DB_BE].[dbo].[ViewProjectAssetSensors] WITH (NOLOCK)
'''
sql_query_coordinates='''
SELECT [wmm_id]
,[WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]'''
masts = pd.read_sql(sql_query_masts, self.conn, parse_dates=['StartDate', 'StopDate'])
coordinates = pd.read_sql(sql_query_coordinates, self.conn)
masts = masts.merge(coordinates, left_on='wmm_id', right_on='wmm_id')
masts.set_index(['Project', 'wmm_id', 'WMM_Latitude', 'WMM_Longitude', 'Type'], inplace=True)
masts.sort_index(inplace=True)
return masts
def mvs_ids(self):
masts = self.masts()
mvs_ids = masts.mvs_id.values.tolist()
return mvs_ids
def valid_signal_labels(self):
signal_type_query = '''
SELECT [MDVT_ID]
,[MDVT_Name]
FROM [M2D2_DB_BE].[dbo].[MDataValueType]'''
signal_types = pd.read_sql(signal_type_query, self.conn, index_col='MDVT_Name').MDVT_ID
return signal_types
def column_labels_for_masts(self):
masts = self.masts()
mvs_ids = masts.mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def column_labels_for_data_from_mvs_ids(self, data):
masts = self.masts()
names_map = pd.Series(index=masts.mvs_id.values, data=masts.Name.values).to_dict()
types = self.valid_signal_labels()
types.loc['FLAG'] = 'Flag'
types_map = pd.Series(index=types.values.astype(str), data=types.index.values).to_dict()
data = data.rename(lambda x: rename_mvs_id_column(x, names=names_map, types=types_map), axis=1)
return data
def column_labels_for_wmm_id(self, wmm_id):
masts = self.masts()
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.unique().tolist()
or_string = ' OR '.join(['mvs_id = {}'.format(mvs_id) for mvs_id in mvs_ids])
column_label_sql_query = '''
SELECT [column_id]
,[label]
FROM [M2D2_DB_BE].[dbo].[ViewWindogMetaData]
WITH (NOLOCK)
WHERE {}'''.format(or_string)
column_labels = pd.read_sql(column_label_sql_query, self.conn)
column_labels = column_labels.set_index('column_id')
return column_labels
def data_from_sensors_mvs_ids(self, mvs_ids, signal_type='AVG'):
'''Download sensor data from M2D2
:Parameters:
mvs_ids: int or list
Virtual sensor IDs (mvs_ids) in M2D2, can be singular
signal_type: str, default 'AVG' - NOT SUPPORTED AT THIS TIME
Signal type for download
For example: 'AVG', 'SD', 'MIN', 'MAX', 'GUST'
:Returns:
out: DataFrame with signal data from virtual sensor
'''
if not isinstance(mvs_ids, list):
mvs_ids = [mvs_ids]
valid_mvs_ids = self.mvs_ids()
assert all([mvs_id in valid_mvs_ids for mvs_id in mvs_ids]), f'One of the following is not a valid mvs_id: {mvs_ids}'
mvs_ids_list = sql_list_from_mvs_ids(mvs_ids)
sql_query= f"""
SET NOCOUNT ON
DECLARE @ColumnListID NVARCHAR(4000)
,@startDate DATETIME2
,@endDate DATETIME2
SET @ColumnListID= '{mvs_ids_list}'
SET @startDate = NULL
SET @endDate = NULL
EXECUTE [dbo].[proc_DataExport_GetDataByColumnList]
@ColumnListID
,@startDate
,@endDate
"""
data = pd.read_sql(sql_query, self.conn, index_col='CorrectedTimestamp')
data.index.name = 'stamp'
data.columns.name = 'sensor'
data = self.column_labels_for_data_from_mvs_ids(data)
return data
def data_from_mast_wmm_id(self, wmm_id):
'''Download data from all sensors on a mast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with signal data from each virtual sensor on the mast
'''
masts = self.masts()
wmm_ids = masts.index.get_level_values('wmm_id').sort_values().unique().tolist()
assert wmm_id in wmm_ids, f'the following is not a valid wmm_id: {wmm_id}'
mvs_ids = masts.loc[pd.IndexSlice[:,wmm_id],:].mvs_id.values.tolist()
data = self.data_from_sensors_mvs_ids(mvs_ids)
return data
def metadata_from_mast_wmm_id(self, wmm_id):
'''Download mast metadata from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: DataFrame with mast metadata
'''
sql_query= '''
SELECT [WMM_Latitude]
,[WMM_Longitude]
,[WMM_Elevation]
FROM [M2D2_DB_BE].[dbo].[ViewWindDataSet]
WHERE wmm_id = {}
'''.format(wmm_id)
mast_metadata = pd.read_sql(sql_query, self.conn)
return mast_metadata
def mast_from_wmm_id(self, wmm_id):
'''Download an.MetMast from M2D2
:Parameters:
wmm_id: int
Mast ID (wmm_id) in M2D2
:Returns:
out: an.MetMast with data and metadata from M2D2
'''
print(f'Downloading Mast {wmm_id} from M2D2')
data = self.data_from_mast_wmm_id(wmm_id=wmm_id)
metadata = self.metadata_from_mast_wmm_id(wmm_id=wmm_id)
mast = an.MetMast(data=data,
name=wmm_id,
lat=metadata.WMM_Latitude[0],
lon=metadata.WMM_Longitude[0],
elev=metadata.WMM_Elevation[0])
return mast
def masts_from_project(self, project):
'''Download an.MetMasts from M2D2 for a given project
:Parameters:
project_name: str
Project name in M2D2
:Returns:
out: List of an.MetMasts with data and metadata from M2D2 for a given project
'''
masts = self.masts()
projects = masts.index.get_level_values('Project').unique().tolist()
assert project in projects, f'Project {project} not found in M2D2'.format(project)
wmm_ids = masts.loc[project,:].index.get_level_values('wmm_id').sort_values().unique().tolist()
masts = [self.mast_from_wmm_id(wmm_id) for wmm_id in wmm_ids]
return masts
# Define Turbine class
class Turbine(object):
'''Class to connect to EDF Wind Turbine database
'''
def __init__(self):
'''Data structure for connecting to and downloading data from M2D2. Convention is:
import anemoi as an
turb_db = an.io.database.Turbine()
:Parameters:
:Returns:
out: an.Turbine object connected to Turbine database
'''
self.database = 'Turbine'
server = '10.1.15.53'
db = 'Turbine_DB_BE'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str #Assign connection string
try:
self.conn = pyodbc.connect(self.conn_str) #Apply connection string to connect to database
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def metadata(self):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_turbines = '''
SELECT [TUR_Manufacturer]
,[TUR_RatedOutputkW]
,[TPC_MaxOutput]
,[TUR_RotorDia]
,[TUR_Model]
,[AllHubHeights]
,[TPC_DocumentDate]
,[TUR_ID]
,[IECClass]
,[TPG_ID]
,[TPG_Name]
,[TPC_ID]
,[TVR_VersionName]
,[TPC_dbalevel]
,[TPC_TIScenario]
,[TPC_BinType]
,[TTC_ID]
,[TRPMC_ID]
,[P_ID]
,[P_Name]
FROM [Turbine_DB_BE].[NodeEstimate].[AllPowerCurves]
WHERE TPC_Type = 'Manufacturer General Spec'
'''
turbines = pd.read_sql(sql_query_turbines, self.conn)
return turbines
def power_curve_from_tpc_id(self, tpc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TPCD_AirDensity,
TPCD_WindSpeedBin,
TPCD_OutputKW
FROM TPCDETAILS
WHERE TPC_id = {} AND TPCD_IsDeleted = 0;
'''.format(tpc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
def trust_curve_from_ttc_id(self, ttc_id):
'''Get turbine model metadata'''
assert self.is_connected('Turbine'), 'Trying to query the Turbine DB without being connected.'
sql_query_thrust_curve = '''
SELECT TTCD_AirDensity,
TTCD_WindSpeedBin,
TTCD_ThrustValue
FROM TTCDETAILS
WHERE TTC_id = {} AND TTCD_IsDeleted = 0;
'''.format(ttc_id)
thrust_curve = pd.read_sql(sql_query_thrust_curve, self.conn)
return thrust_curve
# Define Padre class
class Padre(object):
'''Class to connect to PRE Padre database
'''
def __init__(self, database='PADREScada', conn_str=None, conn=None, domino=False):
'''Data structure with both database name and connection string.
:Parameters:
database: string, default None
Name of the padre database to connect to
conn_str: string, default None
SQL connection string needed to connect to the database
conn: object, default None
SQL connection object to database
'''
self.database = database
if self.database == 'PADREScada':
server = '10.1.106.44'
db = 'PADREScada'
elif self.database == 'PadrePI':
server = '10.1.106.44'
db = 'PADREScada'
conn_str = 'DRIVER={SQL Server}; SERVER=%s; DATABASE=%s; Trusted_Connection=yes' %(server, db)
self.conn_str = conn_str
try:
self.conn = pyodbc.connect(self.conn_str)
except:
print('Database connection error: you either don\'t have permission to the database or aren\'t signed onto the VPN')
def is_connected(self, database):
return self.database == database
def assets(self, project=None, turbines_only=False):
'''Returns:
DataFrame of all turbines within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
sql_query_assets = '''
SELECT [AssetKey]
,Projects.[ProjectName]
,[AssetType]
,[AssetName]
,Turbines.[Latitude]
,Turbines.[Longitude]
,[elevation_mt]
FROM [PADREScada].[dbo].[Asset] as Turbines
WITH (NOLOCK)
INNER JOIN [PADREScada].[dbo].[Project] as Projects on Turbines.ProjectKey = Projects.ProjectKey
'''
assets = pd.read_sql(sql_query_assets, self.conn)
assets.set_index(['ProjectName', 'AssetName'], inplace=True)
assets.sort_index(axis=0, inplace=True)
if turbines_only:
assets = assets.loc[assets.AssetType == 'Turbine', :]
assets.drop('AssetType', axis=1, inplace=True)
if project is not None:
assets = assets.loc[project, :]
return assets
def operational_projects(self):
'''Returns:
List of all projects within Padre
'''
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve projects. Use anemoi.DataBase(database="Padre")')
padre_project_query = """
SELECT [ProjectKey]
,[ProjectName]
,[State]
,[NamePlateCapacity]
,[NumGenerators]
,[latitude]
,[longitude]
,[DateCOD]
FROM [PADREScada].[dbo].[Project]
WHERE technology = 'Wind'"""
projects = pd.read_sql(padre_project_query, self.conn)
projects.set_index('ProjectName', inplace=True)
return projects
def turbine_categorizations(self, category_type='EDF'):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve turbines. Use anemoi.DataBase(database="Padre")')
padre_cetegory_query = """
SELECT [CategoryKey]
,[StringName]
FROM [PADREScada].[dbo].[Categories]
WHERE CategoryType = '%s'""" %category_type
categories = pd.read_sql(padre_cetegory_query, self.conn)
categories.set_index('CategoryKey', inplace=True)
return categories
def QCd_turbine_data(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT [TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Ambient_Temperature]
,[IEC Category]
,[EDF Category]
,[Expected Power (kW)]
,[Expected Energy (kWh)]
,[EnergyDelta (kWh)]
,[EnergyDelta (MWh)]
FROM [PADREScada].[dbo].[vw_10mDataBI]
WITH (NOLOCK)
WHERE [assetkey] = %i''' %asset_key
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_data(self, asset_key, start_date=None, end_date=None):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Average_Nacelle_Wdspd]
,[Average_Active_Power]
,[Average_Nacelle_Direction]
,[Average_Blade_Pitch]
,[Minimum_Blade_Pitch]
,[Maximum_Blade_Pitch]
,[Average_Rotor_Speed]
,[Minimum_Rotor_Speed]
,[Maximum_Rotor_Speed]
,[Average_Ambient_Temperature]
,coalesce([IECStringKey_Manual]
,[IECStringKey_FF]
,[IECStringKey_Default]) IECKey
,coalesce([EDFStringKey_Manual]
,[EDFStringKey_FF]
,[EDFStringKey_Default]) EDFKey
,coalesce([State_and_Fault_Manual]
,[State_and_Fault_FF]
,[State_and_Fault]) State_and_Fault
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {} {}'''.format(asset_key, return_between_date_query_string(start_date, end_date))
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def raw_turbine_expected_energy(self, asset_key):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
turbine_data_query = '''
SELECT
[TimeStampLocal]
,[Expected_Power_NTF]
,[Expected_Energy_NTF]
,[Expected_Power_RefMet]
,[Expected_Energy_RefMet]
,[Expected_Power_Uncorr]
,[Expected_Energy_Uncorr]
,[Expected_Power_DensCorr]
,[Expected_Energy_DensCorr]
,[Expected_Power_AvgMet]
,[Expected_Energy_AvgMet]
,[Expected_Power_ProxyWTGs]
,[Expected_Energy_ProxyWTGs]
,[Expected_Power_MPC]
,[Expected_Energy_MPC]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [assetkey] = {}'''.format(asset_key)
turbine_data = pd.read_sql(turbine_data_query, self.conn)
turbine_data['TimeStampLocal'] = pd.to_datetime(turbine_data['TimeStampLocal'], format='%Y-%m-%d %H:%M:%S')
turbine_data.set_index('TimeStampLocal', inplace=True)
turbine_data.sort_index(axis=0, inplace=True)
turbine_data = turbine_data.groupby(turbine_data.index).first()
return turbine_data
def senvion_event_logs(self, project_id):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
sql_query = '''
SELECT [assetkey]
,[TimeStamp]
,[statuscode]
,[incomingphasingoutreset]
FROM [PADREScada].[dbo].[SenvionEventLog]
WHERE projectkey = {} and incomingphasingoutreset != 'Reset'
ORDER BY assetkey, TimeStamp
'''.format(project_id)
event_log = pd.read_sql(sql_query, self.conn)
return event_log
def ten_min_energy_by_status_code(self, project_id, start_date, end_date, padre_NTF=True):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
if padre_NTF:
padre_power_col = 'Expected_Power_NTF'
else:
padre_power_col = 'Expected_Power_DensCorr'
padre_project_query = '''
SELECT [TimeStampLocal]
,[AssetKey]
,[Average_Active_Power]
,[{}]
FROM [PADREScada].[dbo].[WTGCalcData10m]
WITH (NOLOCK)
WHERE [projectkey] = {} {}
ORDER BY TimeStampLocal, AssetKey'''.format(padre_power_col, project_id, return_between_date_query_string(start_date, end_date))
data_ten_min = pd.read_sql(padre_project_query, self.conn).set_index(['TimeStampLocal', 'AssetKey'])
data_ten_min.columns = ['power_active','power_expected']
data_ten_min = data_ten_min.groupby(data_ten_min.index).first()
data_ten_min.index = pd.MultiIndex.from_tuples(data_ten_min.index)
data_ten_min.index.names = ['Stamp', 'AssetKey']
return data_ten_min
def senvion_ten_min_energy_by_status_code(self, project_id, status_codes=[6680.0, 6690.0, 6697.0, 15000.0]):
if not self.is_connected('PADREScada'):
raise ValueError('Need to connect to Padre to retrieve met masts. Use anemoi.DataBase(database="Padre")')
projects = self.operational_projects()
project = projects.loc[projects.ProjectKey == project_id].index.values[0]
if project in ['<NAME>','<NAME>','St. <NAME>']:
padre_NTF = False
else:
padre_NTF = True
event_log = self.senvion_event_logs(project_id=project_id)
event_log_icing = event_log.loc[event_log.statuscode.isin(status_codes), :]
incoming = event_log_icing.loc[event_log_icing.incomingphasingoutreset == 'incoming', ['assetkey', 'statuscode', 'TimeStamp']].reset_index(drop=True)
outgoing = event_log_icing.loc[event_log_icing.incomingphasingoutreset == 'phasing out', 'TimeStamp'].reset_index(drop=True)
status = | pd.concat([incoming, outgoing], axis=1) | pandas.concat |
import logging
import numpy as np
import copy
import pandas as pd
from juneau.utils.utils import sigmoid, jaccard_similarity
from juneau.search.search_prov_code import ProvenanceSearch
class Sorted_State:
def __init__(self, query, tables):
self.name = query.name # the query name
self.tables = tables # a list of table names
def save_a_state(self, state, previous_state, case_id):
if previous_state == None:
self.state = state
return
if case_id == 0:
domains = ["col_sim_ub", "new_row_ub", "prov_sim"]
elif case_id == 1:
domains = ["row_sim_ub", "new_col_ub", "prov_sim"]
elif case_id == 2:
domains = ["col_sim_ub", "row_sim_ub", "nan_diff_ub", "prov_sim"]
for domain in domains:
state[domain] = previous_state[domain].append(state[domain])
self.state = state # a dictionary of feature:dataframe
class Sorted_Components:
def __init__(self, mappings, all_tables, all_graphs, previous_state = None):
self.tables = all_tables
self.comp_tables = []
self.cache_tables = []
self.Graphs = all_graphs
self.mappings = mappings
self.pre_state = previous_state
if self.pre_state != None:
for tn in self.tables.keys():
if tn in self.pre_state.tables:
self.cache_tables.append(tn)
else:
self.comp_tables.append(tn)
else:
self.comp_tables = list(self.tables.keys())
def provenance_score(self, query, alpha):
prov_class = ProvenanceSearch(self.Graphs)
# Compute Provenance Similarity
logging.info("Compute Provenance Similarity!")
table_prov_rank = prov_class.search_score_rank(query.node, self.comp_tables)
table_prov_score = {}
for i, j in table_prov_rank:
table_prov_score["rtable" + i] = j
for i in self.cache_tables:
table_prov_score[i] = self.pre_state.state["prov_sim"]["score"][i]
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
continue
if i not in table_prov_score:
prov_score = 0
else:
prov_score = 1 - sigmoid(table_prov_score[i])
tname = i[6:]
if tname not in self.mappings:
inital_mapping = {}
else:
inital_mapping = self.mappings[tname]
prov_score = alpha * prov_score
rank_candidate.append((i, prov_score, inital_mapping))
return rank_candidate
def col_similarity_ub(self, query, beta):
rank_candiate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candiate.append((i, self.pre_state.state["col_sim_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
col_sim_ub = 0
else:
col_sim_ub = float(beta) * \
float(min(tableA.shape[1], tableB.shape[1]))\
/float(tableA.shape[1] + tableB.shape[1] - len(self.mappings[tname]))
rank_candiate.append((i, col_sim_ub))
return rank_candiate
def row_similarity_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["row_sim_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
row_sim_ub = 0
else:
row_sim_ub = 0
initial_mapping = self.mappings[tname]
for key in initial_mapping.keys():
Avalue = tableA[key].dropna().keys()
Bvalue = tableB[initial_mapping[key]].dropna().values
try:
row_sim = jaccard_similarity(Avalue, Bvalue)
except:
row_sim = 0
if row_sim > row_sim_ub:
row_sim_ub = row_sim
rank_candidate.append((i, beta * row_sim_ub))
return rank_candidate
def new_col_rate_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
tname = i[6:]
tableA = query.value
if tname not in self.mappings:
inital_mapping = {}
new_data_rate = 1
else:
inital_mapping = self.mappings[tname]
new_data_rate = float(tableA.shape[1] - len(inital_mapping))/float(tableA.shape[1])
new_data_rate_ub = float(beta) * new_data_rate
rank_candidate.append((i, new_data_rate_ub))
return rank_candidate
def new_row_rate_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["new_row_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
tableB = self.tables[i]
if tname not in self.mappings:
new_data_rate = 0
else:
new_data_rate = 0
inital_mapping = self.mappings[tname]
for key in inital_mapping.keys():
Alen = tableA[key].dropna().values
Blen = tableB[inital_mapping[key]].dropna().values
try:
new_data_rate_temp = float(1) - float(len(np.intersect1d(Alen, Blen))) / float(len(Alen))
except:
new_data_rate_temp = 0
if new_data_rate_temp > new_data_rate:
new_data_rate = new_data_rate_temp
rank_candidate.append((i, beta * new_data_rate))
return rank_candidate
def nan_delta_ub(self, query, beta):
rank_candidate = []
for i in self.tables.keys():
if i == query.name:
continue
if i in self.cache_tables:
rank_candidate.append((i, self.pre_state.state["nan_diff_ub"]["score"][i]))
continue
tname = i[6:]
tableA = query.value
if tname not in self.mappings:
nan_ub = 0
else:
key_indexA = list(self.mappings[tname].keys())
ub_zero_diff = tableA[key_indexA].isnull().sum().sum()
value_num = tableA.shape[0] * tableA.shape[1]
nan_ub = float(ub_zero_diff)/float(value_num)
rank_candidate.append((i, beta * nan_ub))
return rank_candidate
def merge_additional_training(self, query, alpha, beta):
ub1 = sorted(self.col_similarity_ub(query, beta), key = lambda d:d[1], reverse=True)
ub2 = sorted(self.new_row_rate_ub(query, 1 - alpha - beta), key = lambda d:d[1], reverse=True)
#print(ub1[:5])
#print(ub2[:5])
ub = ub1[0][1] + ub2[0][1]
rank_candidate = self.provenance_score(query, alpha)
old_rank_candidate = copy.deepcopy(rank_candidate)
rank_candidate = []
for i in range(len(old_rank_candidate)):
rank_candidate.append((old_rank_candidate[i][0], old_rank_candidate[i][1] + ub, old_rank_candidate[i][2]))
u1_df = pd.DataFrame([pair[1] for pair in ub1], index = [pair[0] for pair in ub1], columns = ["score"])
u2_df = pd.DataFrame([pair[1] for pair in ub2], index = [pair[0] for pair in ub2], columns = ["score"])
u3_df = pd.DataFrame([pair[1] for pair in old_rank_candidate], index = [pair[0] for pair in old_rank_candidate], columns = ["score"])
sa_state = Sorted_State(query, list(self.tables.keys()))
sa_state_value = {"col_sim_ub":u1_df, "new_row_ub": u2_df, "prov_sim": u3_df}
sa_state.save_a_state(sa_state_value, self.pre_state, 0)
return rank_candidate, old_rank_candidate, sa_state
def merge_feature_engineering(self, query, alpha, beta):
ub1 = sorted(self.row_similarity_ub(query, beta), key = lambda d:d[1], reverse=True)
ub2 = sorted(self.new_col_rate_ub(query, 1 - alpha - beta), key = lambda d:d[1], reverse=True)
print(ub1[:5])
print(ub2[:5])
ub = ub1[0][1] + ub2[0][1]
rank_candidate = self.provenance_score(query, alpha)
old_rank_candidate = copy.deepcopy(rank_candidate)
rank_candidate = []
for i in range(len(old_rank_candidate)):
rank_candidate.append((old_rank_candidate[i][0], old_rank_candidate[i][1] + ub, old_rank_candidate[i][2]))
uf1_df = pd.DataFrame([pair[1] for pair in ub1], index = [pair[0] for pair in ub1], columns = ["score"])
uf2_df = pd.DataFrame([pair[1] for pair in ub2], index = [pair[0] for pair in ub2], columns = ["score"])
uf3_df = pd.DataFrame([pair[1] for pair in old_rank_candidate], index = [pair[0] for pair in old_rank_candidate], columns = ["score"])
sa_state = Sorted_State(query, list(self.tables.keys()))
sa_state_value = {"row_sim_ub":uf1_df, "new_col_ub":uf2_df, "prov_sim":uf3_df}
sa_state.save_a_state(sa_state_value)
return rank_candidate, old_rank_candidate, sa_state
def merge_data_cleaning(self, query, alpha, beta, gamma):
ub1 = sorted(self.col_similarity_ub(query, beta), key=lambda d:d[1], reverse=True)
ub2 = sorted(self.row_similarity_ub(query, gamma), key=lambda d:d[1], reverse=True)
ub3 = sorted(self.nan_delta_ub(query, float(1 - alpha - beta - gamma)), key=lambda d:d[1], reverse=True)
ub = ub1[0][1] + ub2[0][1] + ub3[0][1]
rank_candidate = self.provenance_score(query, alpha)
old_rank_candidate = copy.deepcopy(rank_candidate)
rank_candidate = []
for i in range(len(old_rank_candidate)):
rank_candidate.append((old_rank_candidate[i][0], old_rank_candidate[i][1] + ub, old_rank_candidate[i][2]))
uf1_df = | pd.DataFrame([pair[1] for pair in ub1], index = [pair[0] for pair in ub1], columns = ["score"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import random
import numpy as np
import pandas as pd
from pandas.compat import lrange
from pandas.api.types import CategoricalDtype
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range, NaT, IntervalIndex, Categorical)
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameSorting(TestData):
def test_sort_values(self):
frame = DataFrame([[1, 1, 2], [3, 1, 0], [4, 5, 6]],
index=[1, 2, 3], columns=list('ABC'))
# by column (axis=0)
sorted_df = frame.sort_values(by='A')
indexer = frame['A'].argsort().values
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
indexer = indexer[::-1]
expected = frame.loc[frame.index[indexer]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
# GH4839
sorted_df = frame.sort_values(by=['A'], ascending=[False])
assert_frame_equal(sorted_df, expected)
# multiple bys
sorted_df = frame.sort_values(by=['B', 'C'])
expected = frame.loc[[2, 1, 3]]
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=['B', 'C'], ascending=False)
assert_frame_equal(sorted_df, expected[::-1])
sorted_df = frame.sort_values(by=['B', 'A'], ascending=[True, False])
assert_frame_equal(sorted_df, expected)
pytest.raises(ValueError, lambda: frame.sort_values(
by=['A', 'B'], axis=2, inplace=True))
# by row (axis=1): GH 10806
sorted_df = frame.sort_values(by=3, axis=1)
expected = frame
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=3, axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 2], axis='columns')
expected = frame.reindex(columns=['B', 'A', 'C'])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1,
ascending=[True, False])
assert_frame_equal(sorted_df, expected)
sorted_df = frame.sort_values(by=[1, 3], axis=1, ascending=False)
expected = frame.reindex(columns=['C', 'B', 'A'])
assert_frame_equal(sorted_df, expected)
msg = r'Length of ascending \(5\) != length of by \(2\)'
with tm.assert_raises_regex(ValueError, msg):
frame.sort_values(by=['A', 'B'], axis=0, ascending=[True] * 5)
def test_sort_values_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
sorted_df = frame.copy()
sorted_df.sort_values(by='A', inplace=True)
expected = frame.sort_values(by='A')
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=1, axis=1, inplace=True)
expected = frame.sort_values(by=1, axis=1)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by='A', ascending=False, inplace=True)
expected = frame.sort_values(by='A', ascending=False)
assert_frame_equal(sorted_df, expected)
sorted_df = frame.copy()
sorted_df.sort_values(by=['A', 'B'], ascending=False, inplace=True)
expected = frame.sort_values(by=['A', 'B'], ascending=False)
assert_frame_equal(sorted_df, expected)
def test_sort_nan(self):
# GH3917
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# sort one column only
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A'], na_position='first')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A'], na_position='first', ascending=False)
assert_frame_equal(sorted_df, expected)
expected = df.reindex(columns=['B', 'A'])
sorted_df = df.sort_values(by=1, axis=1, na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', order
expected = DataFrame(
{'A': [1, 1, 2, 4, 6, 8, nan],
'B': [2, 9, nan, 5, 5, 4, 5]},
index=[3, 0, 1, 6, 4, 5, 2])
sorted_df = df.sort_values(['A', 'B'])
assert_frame_equal(sorted_df, expected)
# na_position='first', order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 2, 9, nan, 5, 5, 4]},
index=[2, 3, 0, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='first', not order
expected = DataFrame(
{'A': [nan, 1, 1, 2, 4, 6, 8],
'B': [5, 9, 2, nan, 5, 5, 4]},
index=[2, 0, 3, 1, 6, 4, 5])
sorted_df = df.sort_values(['A', 'B'], ascending=[
1, 0], na_position='first')
assert_frame_equal(sorted_df, expected)
# na_position='last', not order
expected = DataFrame(
{'A': [8, 6, 4, 2, 1, 1, nan],
'B': [4, 5, 5, nan, 2, 9, 5]},
index=[5, 4, 6, 1, 3, 0, 2])
sorted_df = df.sort_values(['A', 'B'], ascending=[
0, 1], na_position='last')
assert_frame_equal(sorted_df, expected)
# Test DataFrame with nan label
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
# NaN label, ascending=True, na_position='last'
sorted_df = df.sort_index(
kind='quicksort', ascending=True, na_position='last')
expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]},
index=[1, 2, 3, 4, 5, 6, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=True, na_position='first'
sorted_df = df.sort_index(na_position='first')
expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],
'B': [5, 9, nan, 5, 2, 5, 4]},
index=[nan, 1, 2, 3, 4, 5, 6])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='last'
sorted_df = df.sort_index(kind='quicksort', ascending=False)
expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],
'B': [4, 5, 2, 5, nan, 9, 5]},
index=[6, 5, 4, 3, 2, 1, nan])
assert_frame_equal(sorted_df, expected)
# NaN label, ascending=False, na_position='first'
sorted_df = df.sort_index(
kind='quicksort', ascending=False, na_position='first')
expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],
'B': [5, 4, 5, 2, 5, nan, 9]},
index=[nan, 6, 5, 4, 3, 2, 1])
assert_frame_equal(sorted_df, expected)
def test_stable_descending_sort(self):
# GH #6399
df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],
columns=['sort_col', 'order'])
sorted_df = df.sort_values(by='sort_col', kind='mergesort',
ascending=False)
assert_frame_equal(df, sorted_df)
def test_stable_descending_multicolumn_sort(self):
nan = np.nan
df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],
'B': [9, nan, 5, 2, 5, 4, 5]})
# test stable mergesort
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 2, 9]},
index=[2, 5, 4, 6, 1, 3, 0])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 1],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
expected = DataFrame(
{'A': [nan, 8, 6, 4, 2, 1, 1],
'B': [5, 4, 5, 5, nan, 9, 2]},
index=[2, 5, 4, 6, 1, 0, 3])
sorted_df = df.sort_values(['A', 'B'], ascending=[0, 0],
na_position='first',
kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_stable_categorial(self):
# GH 16793
df = DataFrame({
'x': pd.Categorical(np.repeat([1, 2, 3, 4], 5), ordered=True)
})
expected = df.copy()
sorted_df = df.sort_values('x', kind='mergesort')
assert_frame_equal(sorted_df, expected)
def test_sort_datetimes(self):
# GH 3461, argsort / lexsort differences for a datetime column
df = DataFrame(['a', 'a', 'a', 'b', 'c', 'd', 'e', 'f', 'g'],
columns=['A'],
index=date_range('20130101', periods=9))
dts = [Timestamp(x)
for x in ['2004-02-11', '2004-01-21', '2004-01-26',
'2005-09-20', '2010-10-04', '2009-05-12',
'2008-11-12', '2010-09-28', '2010-09-28']]
df['B'] = dts[::2] + dts[1::2]
df['C'] = 2.
df['A1'] = 3.
df1 = df.sort_values(by='A')
df2 = df.sort_values(by=['A'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['B'])
assert_frame_equal(df1, df2)
df1 = df.sort_values(by='B')
df2 = df.sort_values(by=['C', 'B'])
assert_frame_equal(df1, df2)
def test_frame_column_inplace_sort_exception(self):
s = self.frame['A']
with tm.assert_raises_regex(ValueError, "This Series is a view"):
s.sort_values(inplace=True)
cp = s.copy()
cp.sort_values() # it works!
def test_sort_nat_values_in_int_column(self):
# GH 14922: "sorting with large float and multiple columns incorrect"
# cause was that the int64 value NaT was considered as "na". Which is
# only correct for datetime64 columns.
int_values = (2, int(NaT))
float_values = (2.0, -1.797693e308)
df = DataFrame(dict(int=int_values, float=float_values),
columns=["int", "float"])
df_reversed = DataFrame(dict(int=int_values[::-1],
float=float_values[::-1]),
columns=["int", "float"],
index=[1, 0])
# NaT is not a "na" for int64 columns, so na_position must not
# influence the result:
df_sorted = df.sort_values(["int", "float"], na_position="last")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["int", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
# reverse sorting order
df_sorted = df.sort_values(["int", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
# and now check if NaT is still considered as "na" for datetime64
# columns:
df = DataFrame(dict(datetime=[Timestamp("2016-01-01"), NaT],
float=float_values), columns=["datetime", "float"])
df_reversed = DataFrame(dict(datetime=[NaT, Timestamp("2016-01-01")],
float=float_values[::-1]),
columns=["datetime", "float"],
index=[1, 0])
df_sorted = df.sort_values(["datetime", "float"], na_position="first")
assert_frame_equal(df_sorted, df_reversed)
df_sorted = df.sort_values(["datetime", "float"], na_position="last")
assert_frame_equal(df_sorted, df)
# Ascending should not affect the results.
df_sorted = df.sort_values(["datetime", "float"], ascending=False)
assert_frame_equal(df_sorted, df)
def test_sort_nat(self):
# GH 16836
d1 = [Timestamp(x) for x in ['2016-01-01', '2015-01-01',
np.nan, '2016-01-01']]
d2 = [Timestamp(x) for x in ['2017-01-01', '2014-01-01',
'2016-01-01', '2015-01-01']]
df = pd.DataFrame({'a': d1, 'b': d2}, index=[0, 1, 2, 3])
d3 = [Timestamp(x) for x in ['2015-01-01', '2016-01-01',
'2016-01-01', np.nan]]
d4 = [Timestamp(x) for x in ['2014-01-01', '2015-01-01',
'2017-01-01', '2016-01-01']]
expected = pd.DataFrame({'a': d3, 'b': d4}, index=[1, 3, 0, 2])
sorted_df = df.sort_values(by=['a', 'b'], )
tm.assert_frame_equal(sorted_df, expected)
class TestDataFrameSortIndexKinds(TestData):
def test_sort_index_multicolumn(self):
A = np.arange(5).repeat(20)
B = np.tile(np.arange(5), 20)
random.shuffle(A)
random.shuffle(B)
frame = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'])
result = frame.sort_values(by=['A', 'B'])
indexer = np.lexsort((frame['B'], frame['A']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['A', 'B'], ascending=False)
result = frame.sort_values(by=['A', 'B'], ascending=False)
indexer = np.lexsort((frame['B'].rank(ascending=False),
frame['A'].rank(ascending=False)))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
frame.sort_index(by=['B', 'A'])
result = frame.sort_values(by=['B', 'A'])
indexer = np.lexsort((frame['A'], frame['B']))
expected = frame.take(indexer)
assert_frame_equal(result, expected)
def test_sort_index_inplace(self):
frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],
columns=['A', 'B', 'C', 'D'])
# axis=0
unordered = frame.loc[[3, 2, 4, 1]]
a_id = id(unordered['A'])
df = unordered.copy()
df.sort_index(inplace=True)
expected = frame
assert_frame_equal(df, expected)
assert a_id != id(df['A'])
df = unordered.copy()
df.sort_index(ascending=False, inplace=True)
expected = frame[::-1]
assert_frame_equal(df, expected)
# axis=1
unordered = frame.loc[:, ['D', 'B', 'C', 'A']]
df = unordered.copy()
df.sort_index(axis=1, inplace=True)
expected = frame
assert_frame_equal(df, expected)
df = unordered.copy()
df.sort_index(axis=1, ascending=False, inplace=True)
expected = frame.iloc[:, ::-1]
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
indexer = np.random.permutation(100)
A = A.take(indexer)
B = B.take(indexer)
df = DataFrame({'A': A, 'B': B,
'C': np.random.randn(100)})
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['A', 'B'], ascending=[1, 0])
result = df.sort_values(by=['A', 'B'], ascending=[1, 0])
ex_indexer = np.lexsort((df.B.max() - df.B, df.A))
expected = df.take(ex_indexer)
assert_frame_equal(result, expected)
# test with multiindex, too
idf = df.set_index(['A', 'B'])
result = idf.sort_index(ascending=[1, 0])
expected = idf.take(ex_indexer)
assert_frame_equal(result, expected)
# also, Series!
result = idf['C'].sort_index(ascending=[1, 0])
assert_series_equal(result, expected['C'])
def test_sort_index_duplicates(self):
# with 9816, these are all translated to .sort_values
df = DataFrame([lrange(5, 9), lrange(4)],
columns=['a', 'a', 'b', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by='a')
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
df.sort_values(by=['a'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
# multi-column 'by' is separate codepath
df.sort_index(by=['a', 'b'])
with tm.assert_raises_regex(ValueError, 'not unique'):
# multi-column 'by' is separate codepath
df.sort_values(by=['a', 'b'])
# with multi-index
# GH4370
df = DataFrame(np.random.randn(4, 2),
columns=MultiIndex.from_tuples([('a', 0), ('a', 1)]))
with tm.assert_raises_regex(ValueError, 'level'):
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by='a')
with tm.assert_raises_regex(ValueError, 'level'):
df.sort_values(by='a')
# convert tuples to a list of tuples
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=[('a', 1)])
expected = df.sort_values(by=[('a', 1)])
# use .sort_values #9816
with tm.assert_produces_warning(FutureWarning):
df.sort_index(by=('a', 1))
result = df.sort_values(by=('a', 1))
assert_frame_equal(result, expected)
def test_sort_index_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
df = DataFrame([[1, 2], [3, 4]], mi)
res = df.sort_index(level='A', sort_remaining=False)
assert_frame_equal(df, res)
res = df.sort_index(level=['A', 'B'], sort_remaining=False)
assert_frame_equal(df, res)
def test_sort_index_categorical_index(self):
df = (DataFrame({'A': np.arange(6, dtype='int64'),
'B': Series(list('aabbca'))
.astype(CategoricalDtype(list('cab')))})
.set_index('B'))
result = df.sort_index()
expected = df.iloc[[4, 0, 1, 5, 2, 3]]
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
'''
Created on 17.04.2018
@author: malte
'''
import numpy as np
import pandas as pd
class SAGH:
def __init__(self, normalize=False, item_key='track_id', artist_key='artist_id', session_key='playlist_id', return_num_preds=500):
self.item_key = item_key
self.artist_key = artist_key
self.session_key = session_key
self.normalize = normalize
self.return_num_preds = return_num_preds
def train(self, data, test=None):
train = data['actions']
agh = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020-05-13 16:48
# @Author : NingAnMe <<EMAIL>>
import os
import sys
import argparse
from numpy import loadtxt
from numpy import cos as np_cos
from numpy import sin as np_sin
from numpy import radians, arcsin, rad2deg, cumsum
from numpy import ones
from numpy import int16, int8
from numpy import object as np_object
from numpy import array
from numpy import logical_and, logical_or
from numpy import full_like
from pandas import DataFrame, read_excel, concat
from datetime import datetime
from dateutil.relativedelta import relativedelta
import warnings
warnings.filterwarnings('ignore')
root_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
Eq_file = os.path.join(root_dir, 'aid', 'Eq.csv')
Eq_lut = loadtxt(Eq_file, delimiter=',')
def cos(x):
return np_cos(radians(x))
def sin(x):
return np_sin(radians(x))
def get_Lc(Lg):
return 4 * (Lg - 120) / 60.
def get_Ct(hour, minute):
return hour + minute / 60.
def get_Eq(year, month, day):
index = logical_and(year % 4 == 0, year % 100 == 0)
index = logical_or(year % 400 == 0, index)
day[~index] -= 1
return Eq_lut[day, month]
def get_Tt(Ct, Lc, Eq):
eq_60 = Eq / 60
ct_lc = Ct + Lc
return ct_lc + eq_60
def get_Omiga(Tt):
return (Tt - 12) * 15
def get_Delta(n):
return 23.45 * sin(360 / 365 * (284 + n))
def get_EDNI(doy):
E0 = 1366.1
EDNI = E0 * (1 + 0.033 * cos(360. / 365 * doy))
return EDNI
def get_sha_cos(Phi, Delta, Omiga):
shz_cos_ = cos(Phi) * cos(Delta) * cos(Omiga) + sin(Phi) * sin(Delta)
return shz_cos_
def get_EHI(edni, sha_cos):
EHI_ = edni * sha_cos
EHI_ = array(EHI_)
EHI_[EHI_ < 0] = 0
return EHI_
def get_SHA(Phi, Delta, Omiga):
sha_cos_ = get_sha_cos(Phi, Delta, Omiga)
sha_radian = arcsin(sha_cos_)
sha_degree = rad2deg(sha_radian)
return sha_degree
def get_REHI(ehi, frequency='minute'):
if len(ehi) <= 1:
rehi = array(0)
else:
if frequency == 'minute':
rehi = ehi * 60 / 1e6
elif frequency == 'hour':
rehi = ehi * 3600 / 1e6
else:
raise ValueError('frequency must be minute or hour')
rehi = cumsum(rehi)
rehi[1:] = rehi[0:-1]
rehi[0] = 0.0
return rehi
def EDNI_EHI_SHA(longitude, latitude, doy, year, month, day, hour, minute):
Phi = latitude
Lc_ = get_Lc(longitude)
Ct_ = get_Ct(hour, minute)
Eq_ = get_Eq(year, month, day)
Tt_ = get_Tt(Ct_, Lc_, Eq_)
Omiga_ = get_Omiga(Tt_)
Delta_ = get_Delta(doy)
sha_cos_ = get_sha_cos(Phi, Delta_, Omiga_)
EDNI_ = get_EDNI(doy)
EHI_ = get_EHI(EDNI_, sha_cos_)
SHA_ = get_SHA(Phi, Delta_, Omiga_)
if DEBUG:
print(f'Lc_: {Lc_}')
print(f'Ct_: {Ct_}')
print(f'Eq_: {Eq_}')
print(f'Tt_: {Tt_}')
print(f'Omiga_: {Omiga_}')
print(f'Delta_: {Delta_}')
print(f'sha_cos_: {sha_cos_}')
print(f'EDNI_: {EDNI_}')
print(f'EHI_: {EHI_}')
print(f'SHA_: {SHA_}')
return EDNI_, EHI_, SHA_
def get_datetime(datetime_start, datetime_end, frequency='minute'):
if frequency == 'minute':
delta = int((datetime_end - datetime_start).total_seconds() / 60)
elif frequency == 'hour':
datetime_start = datetime_start.strftime("%Y%m%d%H")
datetime_start = datetime.strptime(datetime_start, '%Y%m%d%H')
datetime_end = datetime_end.strftime("%Y%m%d%H")
datetime_end = datetime.strptime(datetime_end, '%Y%m%d%H')
delta = int((datetime_end - datetime_start).total_seconds() / 3600)
else:
raise ValueError('frequency must be minute or hour')
datetimes = ones((delta + 1,), dtype=np_object)
doy = ones((delta + 1,), dtype=int16)
year = ones((delta + 1,), dtype=int16)
month = ones((delta + 1,), dtype=int8)
day = ones((delta + 1,), dtype=int16)
hour = ones((delta + 1,), dtype=int8)
minute = ones((delta + 1,), dtype=int8)
index = 0
while datetime_start <= datetime_end:
datetimes[index] = datetime_start.strftime('%Y%m%d%H%M')
doy[index] = int(datetime_start.strftime('%j'))
year[index] = datetime_start.year
month[index] = datetime_start.month
day[index] = datetime_start.day
hour[index] = datetime_start.hour
minute[index] = datetime_start.minute
index += 1
if frequency == 'minute':
datetime_start += relativedelta(minutes=1)
elif frequency == 'hour':
datetime_start += relativedelta(hours=1)
else:
raise ValueError('frequency must be minute or hour')
return datetimes, doy, year, month, day, hour, minute
def format_result(result):
result['经度'] = result['经度'].map(lambda x: '{:0.2f}'.format(x))
result['纬度'] = result['纬度'].map(lambda x: '{:0.2f}'.format(x))
result['太阳高度角'] = result['太阳高度角'].map(lambda x: '{:0.2f}'.format(x))
result['EDNI(W/m2)'] = result['EDNI(W/m2)'].map(lambda x: '{:0.2f}'.format(x))
result['EHI(W/m2)'] = result['EHI(W/m2)'].map(lambda x: '{:0.2f}'.format(x))
result['累积辐照量(MJ/m2)'] = result['累积辐照量(MJ/m2)'].map(lambda x: '{:0.2f}'.format(x))
result['累积辐照量(kWh/m2)'] = result['累积辐照量(kWh/m2)'].map(lambda x: '{:0.2f}'.format(x))
return result
def get_start(date):
d = date.strftime('%Y%m%d')
return datetime.strptime(d, '%Y%m%d')
def get_end(date):
d = date.strftime('%Y')
return datetime.strptime(d + '12312359', '%Y%m%d%H%M')
def get_start_end(date):
s = get_start(date)
e = get_end(date)
return s, e
def get_datetime_start_end(datetime_start, datetime_end):
starts = list()
ends = list()
delta = datetime_end.year - datetime_start.year
if delta == 0:
starts.append(datetime_start)
ends.append(datetime_end)
elif delta == 1:
end = get_end(datetime_start)
starts.append(datetime_start)
ends.append(end)
start = get_start(datetime_end)
starts.append(start)
ends.append(datetime_end)
else:
end = get_end(datetime_start)
starts.append(datetime_start)
ends.append(end)
for i in range(1, delta):
start, end = get_start_end(datetime_start + relativedelta(years=i))
starts.append(start)
ends.append(end)
start = get_start(datetime_end)
starts.append(start)
ends.append(datetime_end)
return starts, ends
def product_one_point(datetime_start, datetime_end, longitude, latitude, frequency='minute', outfile=None):
if DEBUG:
print('--- product_one_point ---:Start')
print('--- product_one_point <<< datetime_start:{}'.format(datetime_start))
print('--- product_one_point <<< datetime_end:{}'.format(datetime_end))
print('--- product_one_point <<< longitude:{}'.format(longitude))
print('--- product_one_point <<< latitude:{}'.format(latitude))
print('--- product_one_point <<< frequency:{}'.format(frequency))
print('--- product_one_point <<< outfile:{}'.format(outfile))
datetime_start = datetime.strptime(datetime_start, '%Y%m%d%H%M')
datetime_end = datetime.strptime(datetime_end, '%Y%m%d%H%M')
# 2020-06-05:增加按年处理累积
datetime_starts, datetime_ends = get_datetime_start_end(datetime_start, datetime_end)
results_df = None
for datetime_now, datetime_util in zip(datetime_starts, datetime_ends):
datetimes, doy, year, month, day, hour, minute = get_datetime(datetime_now, datetime_util, frequency)
EDNI_, EHI_, SHA_ = EDNI_EHI_SHA(longitude, latitude, doy, year, month, day, hour, minute)
longitude_ = full_like(EDNI_, longitude)
latitude_ = full_like(EDNI_, latitude)
REHI = get_REHI(EHI_, frequency)
results = {
'经度': longitude_,
'纬度': latitude_,
'时间': datetimes,
'太阳高度角': SHA_,
'EDNI(W/m2)': EDNI_,
'EHI(W/m2)': EHI_,
'累积辐照量(MJ/m2)': REHI,
'累积辐照量(kWh/m2)': REHI / 3.6,
}
if results_df is None:
results_df = DataFrame(results)
else:
results_df = concat((results_df, DataFrame(results)))
if results_df is not None:
results = format_result(results_df)
results = results[['经度', '纬度', '时间', '太阳高度角',
'EDNI(W/m2)', 'EHI(W/m2)', '累积辐照量(MJ/m2)', '累积辐照量(kWh/m2)']]
if outfile is not None:
results.to_csv(outfile, index=False)
if DEBUG:
print('--- product_one_point ---:>>>:{}'.format(outfile))
if DEBUG:
print('--- product_one_point ---:End')
return results
def product_multi_point(infile, outfile, frequency='minute'):
if DEBUG:
print('--- product_multi_point ---:Start')
print('--- product_multi_point <<< infile:{}'.format(infile))
print('--- product_multi_point <<< outfile:{}'.format(outfile))
indata = | read_excel(infile) | pandas.read_excel |
"""
Created on June 6, 2016
@author: <NAME> (<EMAIL>)
Updated Nov 21, 2017 by <NAME> (github.com/Spenca)
"""
import csv
import os, sys, io
import re
import pandas as pd
import numpy as np
import requests
import yaml
from string import Template
from collections import OrderedDict
from datetime import date, datetime, timedelta
#===============
# Django imports
#---------------
from django.db.models import Count, Q, F
from django.http import HttpResponse
from sisyphus.models import DlpAnalysisInformation, Project
from tenx.models import TenxPool
from .models import Sample, SublibraryInformation, ChipRegion, ChipRegionMetadata, MetadataField, DoubletInformation
from dlp.models import (DlpLane, DlpSequencing, DlpLibrary)
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
#============================
# Pipeline Status
#----------------------------
def get_sequence_date(analysis, library=None):
try:
# Is it for Library?
if library:
sequencing_set = analysis.dlpsequencing_set.all()
# Does Analysis have lanes, if then retrieve latest lane_requested_date from sequencings related to lanes
elif analysis.lanes.all():
sequencing_set = set(l.sequencing for l in analysis.lanes.all())
# Else, Does Analysis have sequencings, retrieve latest lane_requested_date from sequencings directly attached analysis
elif analysis.sequencings.all():
sequencing_set = analysis.sequencings.all()
# Else, Does Analysis's Library have sequencings
else:
sequencing_set = analysis.library.dlpsequencing_set.all()
return max([sequencing.lane_requested_date for sequencing in sequencing_set])
except:
return None
def analysis_info_dict(analysis):
lanes = analysis.lanes.count()
goal = sum(s.number_of_lanes_requested for s in analysis.sequencings.all())
submission_date = get_sequence_date(analysis)
return {
"jira": analysis.analysis_jira_ticket,
"lanes": "{}/{}".format(lanes, goal),
"version": analysis.version.version,
"run_status": analysis.analysis_run.run_status,
"aligner": "bwa-aln" if analysis.aligner is "A" else "bwa-mem",
"submission": str(submission_date) if submission_date else None,
"last_updated": str(analysis.analysis_run.last_updated.date()) if analysis.analysis_run.last_updated else None
}
def fetch_montage():
r = requests.get('https://52.235.35.201/_cat/indices', verify=False, auth=("guest", "sh<PASSWORD>!Montage")).text
return [j.replace("sc", "SC") for j in re.findall('sc-\d{4}', r)]
def analysis_to_row(analysis, basic_dict=None, incomplete=None):
if not basic_dict:
basic_dict = {"name": analysis.library.sample.sample_id, "library": analysis.library.pool_id}
return {**basic_dict, **analysis_info_dict(analysis)}
# | Validate whether a given analysis is IMPORTED or not
# | Input: Analysis
# | Ouput: Boolean
# {True if imported}
def validate_imported(analysis):
# Retrieve all lanes attached to Analysis and create a set of seqeuncings based on it
related_sequencings = set(l.sequencing for l in analysis.lanes.all())
# Check if count(lanes attached to analysis) is smaller or equal to count(lanes attached to related_sequencings)
return analysis.lanes.count() <= sum(s.dlplane_set.count() for s in related_sequencings)
# | (INCOMPLETE) Fetch Row Information related to incomplete Analyses
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_incomplete_analyses():
object_list = []
analyses = DlpAnalysisInformation.objects.exclude(
analysis_run__run_status__in=['complete', 'align_complete', 'hmmcopy_complete'])
for a in analyses.all():
object_list.append(analysis_to_row(a, incomplete=True))
return object_list
# | (PROJECTS) Fetch Row Information related to given a set of dlp libraries
# | Input: Set of Dlp Libraries
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_libraries(libraries, wetlab=None, no_analysis=None):
object_list = []
for library in libraries:
basic_dict = {"name": library.sample.sample_id, "library": library.pool_id}
# For each libraries retrieve all attached analyses
analyses = library.dlpanalysisinformation_set.all()
if analyses and not no_analysis:
for analysis in analyses:
#Hide completed analysis if wetlab
if not wetlab:
object_list.append((analysis_to_row(analysis, basic_dict)))
# If Library does not have any analysis, fill in NA information
else:
# if Wetlab display Sequencing lane info instead of Analysis lane info
if wetlab or no_analysis:
sequencings = library.dlpsequencing_set.all()
if sequencings:
goal = sum(l.number_of_lanes_requested for l in sequencings)
lane = sum(l.dlplane_set.count() for l in sequencings)
basic_dict = {**basic_dict, "lanes": "{}/{}".format(lane, goal) if sequencings else None}
object_list.append({**basic_dict, "submission": str(get_sequence_date(library, True))})
return object_list
# | Fetch Row Information related to given a set of sequencings
# | Input: Set of Sequencings
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_sequencings(sequencings, wetlab=False):
object_list = []
for sequencing in sequencings:
object_list += fetch_rows_from_libraries([sequencing.library], wetlab=wetlab)
return object_list
# | (NO ANALYSIS) Fetch Row Information related to libraries with no analyses but correct lane numbers
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_from_no_analysis_libraries():
libraries = DlpLibrary.objects\
.annotate(lane_count=Count('dlpsequencing__dlplane'),lane_goal=Count('dlpsequencing__number_of_lanes_requested'))\
.filter(Q(dlpanalysisinformation=None)&Q(lane_count=F('lane_goal'))).all()
return fetch_rows_from_libraries(libraries, no_analysis=True)
# | (WETLAB) Fetch Row Information from sequencings with certain conditions:
# | 1. (OR) Mismatching lane count
# | 2. (AND) Lane requested within 2 months
# | 3. Additionally, hide completed analyses
# | 4. Recently COMPLETED
# | Input: None
# | Ouput: List of Objects to populate Status Page Rows
def fetch_rows_for_wetlab():
threshold = datetime.now() - timedelta(days=60)
# Unimported
sequencings = DlpSequencing.objects\
.annotate(lane_count=Count('dlplane'))\
.filter((Q(lane_count=0)|Q(lane_count__lt=F('number_of_lanes_requested')))&Q(lane_requested_date__gte=threshold))
# Recently Finished or Updated
threshold = datetime.now() - timedelta(days=14)
analyses = DlpAnalysisInformation.objects\
.filter(Q(analysis_run__run_status__in=['complete','align_complete','hmmcopy_complete'])&Q(analysis_run__last_updated__gte=threshold))
analyses_list = [{
**{
"name": a.library.sample.sample_id,
"library": a.library.pool_id
},
**analysis_info_dict(a)
} for a in analyses]
return fetch_rows_from_sequencings(sequencings, wetlab=True) + analyses_list
# | List of Status Page Row Objects
# |
# | WETLAB:
# | Populate row from all sequencings with lane !== goal && recently submitted (2 months)
# |
# | NO ANALYSIS:
# | Populate row from all libraries with sum(sequencing's requested_lane_number) == sum(sequencing's lane count),
# | but no Analysis attached.
# |
# | INCOMPLETE:
# | Populate row from all analyses with run_status not set as either one of ['complete','align_complete','hmmcopy_complete']
# |
# | PROJECTS:
# | Populate rows from set of DlpLibraries of selected Project
def fetch_row_objects(type, key=None):
type = type.strip()
if type == "PROJECTS":
return fetch_rows_from_libraries(Project.objects.get(name=key).dlplibrary_set.all())
elif type == "INCOMPLETE":
return fetch_rows_from_incomplete_analyses()
elif type == "NO ANALYSIS":
return fetch_rows_from_no_analysis_libraries()
elif type == "WETLAB":
return fetch_rows_for_wetlab()
else:
return
#==================================================
# Upload, parse and populate Sublibrary Information
#--------------------------------------------------
def read_excel_sheets(filename, sheetnames):
""" Read the excel sheet.
"""
try:
data = pd.read_excel(filename, sheet_name=None)
except IOError:
raise ValueError('unable to find file', filename)
for sheetname in sheetnames:
if sheetname not in data:
raise ValueError('unable to read sheet(s)', sheetname)
yield data[sheetname]
def check_smartchip_row(index, smartchip_row):
row_sum = sum(smartchip_row)
single_matrix = np.identity(3)
doublet_matrix = np.identity(3) * 2
# Row does not have cells
if smartchip_row == [0, 0, 0]:
cell = None
# TODO: Clean up code; use identity matrices
# Row is singlet
elif row_sum == 1:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, single_matrix[row]):
cell = [row, 0]
# Row is doublet and is strictly live/dead/other
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) == 2:
for row in range(len(smartchip_row)):
if np.array_equal(smartchip_row, doublet_matrix[row]):
cell = [row, 1]
# Row is doublet but mixed
elif row_sum == 2 and len(np.where(np.array(smartchip_row) == 0)[0]) != 2:
cell = [2, 1]
# Greater than doublet row and row is multiple of unit vector
elif row_sum > 2 and row_sum in smartchip_row:
non_zero_index = np.where(smartchip_row != 0)
cell = [non_zero_index[0][0], 2]
else:
cell = [2, 2]
return cell
def generate_doublet_info(filename):
""" Read SmartChipApp results and record doublet info
"""
col_names = ["live", "dead", "other"]
row_names = ["single", "doublet", "more_than_doublet"]
data = np.zeros((3, 3))
doublet_table = pd.DataFrame(data, columns=col_names, index=row_names, dtype=int)
results = pd.read_excel(filename, sheet_name="Summary")
results = results[results["Condition"] != "~"]
for index, row in results.iterrows():
smartchip_row = [row["Num_Live"], row["Num_Dead"], row["Num_Other"]]
override_row = [row["Rev_Live"], row["Rev_Dead"], row["Rev_Other"]]
if np.array_equal(override_row, [-1, -1, -1]):
cell = check_smartchip_row(index, smartchip_row)
else:
cell = check_smartchip_row(index, override_row)
if cell is not None:
doublet_table[col_names[cell[0]]][row_names[cell[1]]] += 1
return doublet_table
def parse_smartchipapp_results_file(filename):
""" Parse the result file of SmartChipApp.
"""
results, region_metadata = read_excel_sheets(filename, ['Summary', 'Region_Meta_Data'])
# filter out the cells whose Spot_Well value is not NaN
results = results[~results['Spot_Well'].isnull()]
results = results.sort_values(by='Sample')
# change the column names to match the filed names of the model
results.columns = [c.lower() for c in results.columns]
region_metadata.columns = [c.lower() for c in region_metadata.columns]
# Lower case metadata field names and check if column exists in metadata fields
# region_metadata.columns = [c.lower() for c in region_metadata.columns]
for c in region_metadata.columns:
if c not in MetadataField.objects.all().values_list('field', flat=True) and c != "region":
raise ValueError('invalid metadata column: {col_name}'.format(col_name=c))
region_metadata.columns.name = 'metadata_field'
region_metadata.rename(columns={'region': 'region_code'}, inplace=True)
region_metadata = region_metadata.set_index('region_code').stack().rename('metadata_value').reset_index()
return results, region_metadata
def create_sublibrary_models(library, sublib_results, region_metadata):
""" Create sublibrary models from SmartChipApp Tables
"""
# Populate the ChipRegion and ChipRegionMetadata from the SmartChipApp results
chip_spot_region_id = {}
chip_spot_sample_id = {}
for code, metadata in region_metadata.groupby('region_code'):
chip_region = ChipRegion(region_code=code)
chip_region.library_id = library.pk
chip_region.save()
sample_id = None
for idx, row in metadata.iterrows():
row['metadata_field'] = row['metadata_field'].lower()
chip_region_metadata = ChipRegionMetadata(
metadata_field=MetadataField.objects.get(field=row['metadata_field']),
metadata_value=row['metadata_value'])
chip_region_metadata.chip_region_id = chip_region.id
chip_region_metadata.save()
if row['metadata_field'] == 'sample_id':
sample_id = row['metadata_value']
if sample_id is None:
raise ValueError('No sample id for region {}'.format(code))
try:
#Need to encode as ascii and ignore special characters, otherwise we get sample IDs like 'SA1151\xa0' instead of 'SA1151'
sample = Sample.objects.get(sample_id=sample_id.encode('ascii', 'ignore'))
except Sample.DoesNotExist:
raise ValueError('Unrecognized sample {}'.format(sample_id))
for idx, row in sublib_results[sublib_results['condition'] == code].iterrows():
chip_spot_region_id[(row['row'], row['column'])] = chip_region.id
chip_spot_sample_id[(row['row'], row['column'])] = sample
# Populate the Sublibrary from the SmartChipApp input and results
for idx, row in sublib_results.iterrows():
row = row.drop('rev_class')
sublib = SublibraryInformation(**row.to_dict())
sublib.library_id = library.pk
try:
sublib.chip_region_id = chip_spot_region_id[(row['row'], row['column'])]
sublib.sample_id = chip_spot_sample_id[(row['row'], row['column'])]
sublib.save()
except KeyError:
raise ValueError('Undefined condition in metadata at row, column: {}, {}'.format(row['row'], row['column']))
library.num_sublibraries = len(sublib_results.index)
library.save()
def create_doublet_info_model(library, doublet_info_matrix):
try:
doublet_info = DoubletInformation.objects.get(library=library)
except:
doublet_info = DoubletInformation.objects.create(library=library)
doublet_info.save()
doublet_info.live_single = doublet_info_matrix["live"]["single"]
doublet_info.dead_single = doublet_info_matrix["dead"]["single"]
doublet_info.other_single = doublet_info_matrix["other"]["single"]
doublet_info.live_doublet = doublet_info_matrix["live"]["doublet"]
doublet_info.dead_doublet = doublet_info_matrix["dead"]["doublet"]
doublet_info.other_doublet = doublet_info_matrix["other"]["doublet"]
doublet_info.live_gt_doublet = doublet_info_matrix["live"]["more_than_doublet"]
doublet_info.dead_gt_doublet = doublet_info_matrix["dead"]["more_than_doublet"]
doublet_info.other_gt_doublet = doublet_info_matrix["other"]["more_than_doublet"]
doublet_info.save()
#=================
# History manager
#-----------------
class HistoryManager(object):
"""
An api for simple_history app.
"""
@staticmethod
def print_history(object, history_type=None):
print('=' * 100)
print("Object\tID\tDate\tAction\tUser")
print('=' * 100)
if history_type is None:
histories = object.history.all()
else:
histories = object.history.filter(history_type=history_type)
for h in histories:
print("\t".join([
str(h.instance),
str(h.instance.id),
str(h.history_date),
h.get_history_type_display(),
str(h.history_user),
]))
print('-' * 100)
def generate_tenx_pool_sample_csv(id):
buffer = io.StringIO()
pool = TenxPool.objects.get(id=id)
list_of_dict = []
for library in pool.libraries.all():
index = library.tenxlibraryconstructioninformation.index_used
list_of_dict.append({"lane": "*", "sample": library.name, "index": index.split(",")[0] if index else "None"})
wr = csv.DictWriter(buffer, fieldnames=["lane", "sample", "index"])
wr.writeheader()
wr.writerows(list_of_dict)
buffer.seek(0)
response = HttpResponse(buffer, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename={}_tenxpool_sample.csv'.format(pool.id)
return response
#======================
# Generate sample sheet
#----------------------
def generate_samplesheet(pk, wdir=None):
"""generate samplesheet for the given Sequencing."""
samplesheet = SampleSheet(pk)
sheet_name = samplesheet.sheet_name
if wdir:
ofilename = os.path.join(wdir, sheet_name)
else:
ofilename = os.path.join(settings.MEDIA_ROOT, sheet_name)
samplesheet.write_header(ofilename)
samplesheet.write_data(ofilename)
return sheet_name, os.path.abspath(ofilename)
class SampleSheet(object):
"""
Sequencing SampleSheet.
"""
def __init__(self, pk):
self._lane = get_object_or_404(DlpLane, pk=pk)
self._si = self._lane.sequencing.sequencing_instrument
self._header = os.path.join(settings.BASE_DIR, "templates/template_samplesheet_header.html")
self._colnames = [
'Sample_ID', 'Sample_Name', 'Sample_Plate', 'Sample_Well', 'I7_Index_ID', 'index', 'I5_Index_ID', 'index2',
'Sample_Project', 'Description'
]
self._rev_comp_i7 = False
self._rev_comp_i5 = False
# All the sequencing machines listed in the models need i7 to be reverse complemented
if self._si != "O":
self._rev_comp_i7 = True
# Only the NextSeq & HX requires the i5 to be reverse complemented
if self._si == "N550" or self._si != 'HX':
self._rev_comp_i5 = True
rev_comp_override = self._lane.sequencing.rev_comp_override
if rev_comp_override is not None:
self._rev_comp_i7 = ('rev(i7)' in rev_comp_override)
self._rev_comp_i5 = ('rev(i5)' in rev_comp_override)
@property
def sequencing(self):
return self._sequencing
@property
def sheet_name(self):
fc_id = self._lane.flow_cell_id
sheet_name = 'SampleSheet_%s.csv' % fc_id
return sheet_name
def write_header(self, ofilename):
"""write the header section of the sequencing SampleSheet."""
with open(self._header, 'r') as tempstr:
s = Template(tempstr.read())
d = {
'sequencing_instrument': self._lane.sequencing.get_sequencing_instrument_display(),
'submission_date': self._lane.sequencing.submission_date,
'pool_id': self._lane.sequencing.library.pool_id,
'read1_length': self._lane.sequencing.read1_length,
'read2_length': self._lane.sequencing.read2_length,
}
# Sequencing may have no SequencingDetail
try:
d['flow_cell_id'] = self._lane.flow_cell_id
except:
d['flow_cell_id'] = None
s = s.safe_substitute(**d)
ofile = open(ofilename, 'w')
ofile.write(s)
ofile.close()
def write_data(self, ofilename):
"""write the data section of the sequencing SampleSheet."""
data_table = self._mk_data_table()
# reorder the columns
if (len(data_table.columns) != 0):
data_table = data_table[self._colnames]
data_table.to_csv(ofilename, mode='a', index=False)
else:
ofile = open(ofilename, 'w')
ofile.write("ERROR")
ofile.write("\nNo sublibrary data, cannot generate samplesheet\n")
ofile.close()
def _mk_data_table(self):
"""make data table for data section of the samplesheet template."""
def _map_to_template(s):
d = s.to_dict()
# This is the relation between columns in the template samplesheet
# and the actual columns in df from LIMS.
# for leading 0s in samplesheet
row = str(d['row']) if d['row'] > 9 else '0' + str(d['row'])
col = str(d['column']) if d['column'] > 9 else '0' + str(d['column'])
index = d['primer_i7']
if self._rev_comp_i7:
index = _rc(index)
index2 = d['primer_i5']
if self._rev_comp_i5:
index2 = _rc(index2)
res = {
'Sample_ID':
'-'.join([
str(self._lane.sequencing.library.sample),
str(self._lane.sequencing.library.pool_id), 'R' + row, 'C' + col
]),
'Sample_Name':
'',
'Sample_Plate':
'R' + str(d['row']) + '_C' + str(d['column']),
'Sample_Well':
'R' + str(d['row']) + '_C' + str(d['img_col']),
'I7_Index_ID':
d['index_i7'],
'index':
index,
'I5_Index_ID':
d['index_i5'],
'index2':
index2,
'Description':
'CC=' + d['pick_met'] + ';' + 'EC=' + d['condition'],
}
return res
sample_project = '' #','.join(sequencing.library.projects.names())
newl = []
oldl = list(self._lane.sequencing.library.sublibraryinformation_set.values())
df = pd.DataFrame(oldl)
for d in df.apply(_map_to_template, axis=1):
d['Sample_Project'] = sample_project
newl.append(d)
return | pd.DataFrame(newl) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [["name_list"], np.ones(2), {1: 2}]:
for data in [["name_list"], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range("1/1/2000", periods=10)))
assert series.dtype == "M8[ns]"
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
# TODO: is the above comment still accurate/needed?
arr = np.array(
["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"
)
ser = Series(arr)
expected = Series(date_range("20130101", periods=3, freq="D"))
tm.assert_series_equal(ser, expected)
arr = np.array(
["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"],
dtype="datetime64[s]",
)
ser = Series(arr)
expected = Series(date_range("20130101 00:00:01", periods=3, freq="s"))
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
# strip Index to convert PeriodIndex -> Period
# We don't care whether the error message says
# PeriodIndex or PeriodArray
msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(pd.Index(index, dtype=object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(index.astype(object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_constructor_generic_timestamp_no_frequency(self, dtype, request):
# see gh-15524, gh-15987
msg = "dtype has no unit. Please pass in"
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
with pytest.raises(ValueError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize(
"dtype,msg",
[
("m8[ps]", "cannot convert timedeltalike"),
("M8[ps]", "cannot convert datetimelike"),
],
)
def test_constructor_generic_timestamp_bad_frequency(self, dtype, msg):
# see gh-15524, gh-15987
with pytest.raises(TypeError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64")
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_tz_mixed_data(self):
# GH 13051
dt_list = [
Timestamp("2016-05-01 02:03:37"),
Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"),
]
result = Series(dt_list)
expected = Series(dt_list, dtype=object)
tm.assert_series_equal(result, expected)
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture):
# GH#25843
tz = tz_aware_fixture
result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]")
expected = Series([Timestamp("2019")])
tm.assert_series_equal(result, expected)
def test_constructor_datetime64(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
series = Series(dates)
assert np.issubdtype(series.dtype, np.dtype("M8[ns]"))
def test_constructor_datetimelike_scalar_to_string_dtype(self):
# https://github.com/pandas-dev/pandas/pull/33846
result = Series("M", index=[1, 2, 3], dtype="string")
expected = pd.Series(["M", "M", "M"], index=[1, 2, 3], dtype="string")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
[np.datetime64("2012-01-01"), np.datetime64("2013-01-01")],
["2012-01-01", "2013-01-01"],
],
)
def test_constructor_sparse_datetime64(self, values):
# https://github.com/pandas-dev/pandas/issues/35762
dtype = pd.SparseDtype("datetime64[ns]")
result = | pd.Series(values, dtype=dtype) | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, T<NAME>, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
import pandas as pd
import datetime as dt
from packaging import version
from pandapower import compare_arrays
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
__author__ = 'smeinecke'
def ensure_iterability(var, len_=None):
""" This function ensures iterability of a variable (and optional length). """
if hasattr(var, "__iter__") and not isinstance(var, str):
if isinstance(len_, int) and len(var) != len_:
raise ValueError("Length of variable differs from %i." % len_)
else:
len_ = len_ or 1
var = [var]*len_
return var
def find_idx_by_name(df, column, name):
idx = df.index[df[column] == name]
if len(idx) == 0:
raise UserWarning("In column '%s', there is no element named %s" % (column, name))
if len(idx) > 1:
raise UserWarning("In column '%s', multiple elements are named %s" % (column, name))
return idx[0]
def idx_in_2nd_array(arr1, arr2, match=True):
""" This function returns an array of indices of arr1 matching arr2.
arr1 may include duplicates. If an item of arr1 misses in arr2, 'match' decides whether
the idx of the nearest value is returned (False) or an error is raised (True).
"""
if match:
missings = list(set(arr1) - set(arr2))
if len(missings):
raise ValueError("These values misses in arr2: " + str(missings))
arr1_, uni_inverse = np.unique(arr1, return_inverse=True)
sort_lookup = np.argsort(arr2)
arr2_ = np.sort(arr2)
idx = np.searchsorted(arr2_, arr1_)
res = sort_lookup[idx][uni_inverse]
return res
def column_indices(df, query_cols):
""" returns an numpy array with the indices of the columns requested by 'query_cols'.
Works propperly for string column names. """
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols, query_cols, sorter=sidx)]
def merge_dataframes(dfs, keep="first", sort_index=True, sort_column=True, column_to_sort=None,
index_time_str=None, **kwargs):
"""
This is a wrapper function of pandas.concat(dfs, axis=0) to merge DataFrames.
INPUT:
**dfs** (DataFrames) - a sequence or mapping of DataFrames
OPTIONAL:
**keep** (str, "first") - Flag to decide which data are kept in case of duplicated
indices - first, last or all duplicated data.
**sort_index** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**sort_column** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**column_to_sort** (-, None) - If given, 'column_to_sort' must be a column name occuring in
both DataFrames. The returning DataFrame will be sorted by this column. The input
indices get lost.
**index_time_str** (str, None) - If given, the indices or the 'column_to_sort' if given will
be sorted in datetime order.
****kwargs** - Keyword arguments for pandas.concat() except axis, such as sort, join,
join_axes, ignore_index, keys. 'sort' can overwrite 'sort_index' and 'sort_column'.
"""
if "axis" in kwargs:
if kwargs["axis"] != 0:
logger.warning("'axis' is always assumed as zero.")
kwargs.pop("axis")
if "sort" in kwargs:
if not kwargs["sort"] == sort_index == sort_column:
sort_index = kwargs["sort"]
sort_column = kwargs["sort"]
if not sort_index or not sort_column:
logger.warning("'sort' overwrites 'sort_index' and 'sort_column'.")
kwargs.pop("sort")
# --- set index_column as index
if column_to_sort is not None:
if any([column_to_sort not in df.columns for df in dfs]):
raise KeyError("column_to_sort '%s' must be a column of " % column_to_sort +
"both dataframes, df1 and df2")
if not sort_index:
logger.warning("Since 'column_to_sort' is given, the returning DataFrame will be" +
"sorted by this column as well as the columns, although 'sort' " +
"was given as False.")
sort_index = True
dfs = [df.set_index(column_to_sort) for df in dfs]
# --- concat
df = pd.concat(dfs, axis=0, **kwargs)
# --- unsorted index and columns
output_index = df.index.drop_duplicates()
# --- drop rows with duplicated indices
if keep == "first":
df = df.groupby(df.index).first()
elif keep == "last":
df = df.groupby(df.index).last()
elif keep != "all":
raise ValueError("This value %s is unknown to 'keep'" % keep)
# --- sorted index and reindex columns
if sort_index:
if index_time_str:
dates = [dt.datetime.strptime(ts, index_time_str) for ts in df.index]
dates.sort()
output_index = [dt.datetime.strftime(ts, index_time_str) for ts in dates]
if keep == "all":
logger.warning("If 'index_time_str' is not None, keep cannot be 'all' but are " +
"assumed as 'first'.")
else:
output_index = sorted(df.index)
# --- reindex as required
if keep != "all":
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(output_index)
else:
df = df.reindex_axis(output_index)
if sort_column:
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(columns=sorted(df.columns))
else:
df = df.reindex_axis(sorted(df.columns), axis=1)
# --- get back column_to_sort as column from index
if column_to_sort is not None:
df.reset_index(inplace=True)
return df
def get_unique_duplicated_dict(df, subset=None, only_dupl_entries=False):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index.
This is a wrapper function of _get_unique_duplicated_dict() to consider only_dupl_entries.
"""
is_dupl = df.duplicated(subset=subset, keep=False)
uniq_dupl_dict = _get_unique_duplicated_dict(df[is_dupl], subset)
if not only_dupl_entries:
others = df.index[~is_dupl]
uniq_empties = {o: [] for o in others}
# python 3.5+
# uniq_dupl_dict = {**uniq_dupl_dict, **uniq_empties}
# python 3.4
for k, v in uniq_empties.items():
uniq_dupl_dict[k] = v
return uniq_dupl_dict
def _get_unique_duplicated_dict(df, subset=None):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index. """
subset = subset or df.columns
dupl = df.index[df.duplicated(subset=subset)]
uniq = df.index[~df.duplicated(subset=subset)]
uniq_dupl_dict = {}
# nan_str only needed since compare_arrays() using old numpy versions connected to python 3.4
# don't detect reliably nans as equal
nan_str = "nan"
while nan_str in df.values:
nan_str += "n"
for uni in uniq:
do_dupl_fit = compare_arrays(
np.repeat(df.loc[uni, subset].fillna(nan_str).values.reshape(1, -1), len(dupl), axis=0),
df.loc[dupl, subset].fillna(nan_str).values).all(axis=1)
uniq_dupl_dict[uni] = list(dupl[do_dupl_fit])
return uniq_dupl_dict
def reindex_dict_dataframes(dataframes_dict):
""" Set new continuous index starting at zero for every DataFrame in the dict. """
for key in dataframes_dict.keys():
if isinstance(dataframes_dict[key], pd.DataFrame) and key != "StudyCases":
dataframes_dict[key].index = list(range(dataframes_dict[key].shape[0]))
def ensure_full_column_data_existence(dict_, tablename, column):
"""
Ensures that the column of a dict's DataFrame is fully filled with information. If there are
missing data, it will be filled up by name tablename+index
"""
missing_data = dict_[tablename].index[dict_[tablename][column].isnull()]
# fill missing data by tablename+index, e.g. "Bus 2"
dict_[tablename][column].loc[missing_data] = [tablename + ' %s' % n for n in (
missing_data.values + 1)]
return dict_[tablename]
def avoid_duplicates_in_column(dict_, tablename, column):
""" Avoids duplicates in given column (as type string) of a dict's DataFrame """
query = dict_[tablename][column].duplicated(keep=False)
for double in dict_[tablename][column].loc[query].unique():
idx = dict_[tablename][column].index[dict_[tablename][column] == double]
dict_[tablename][column].loc[idx] = [double + " (%i)" % i for i in range(len(idx))]
if sum(dict_[tablename][column].duplicated()):
raise ValueError("The renaming by 'double + int' was not appropriate to remove all " +
"duplicates.")
def append_str_by_underline_count(str_series, append_only_duplicates=False, counting_start=1,
reserved_strings=None):
"""
Returns a Series of appended strings and a set of all strings which were appended or are set as
reserved by input.
INPUT:
**str_series** (Series with string values) - strings to be appended by "_" + a number
OPTIONAL:
**append_only_duplicates** (bool, False) - If True, all strings will be appended. If False,
only duplicated strings will be appended.
**counting_start** (int, 1) - Integer to start appending with
**reserved_strings** (iterable, None) - strings which are not allowed in str_series and must
be appended.
OUTPUT:
**appended_strings** (Series with string values) - appended strings
**reserved_strings** (set) - all reserved_strings from input and all strings which were
appended
"""
# --- initalizations
# ensure only unique values in reserved_strings:
reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \
else pd.Series()
count = counting_start
# --- do first append
# concatenate reserved_strings and str_series (which should be appended by "_%i")
# must be in this order (first reserved_strings) to append only the str_series (keep='first')
if not append_only_duplicates:
series = str_series + "_%i" % count
series = | pd.concat([reserved_strings, series], ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# ### Import necessary libraries
# In[1]:
# Data representation and computation
import pandas as pd
import numpy as np
pd.options.display.float_format = '{:20,.4f}'.format
# plotting
import matplotlib.pyplot as plt
import seaborn as sns
# Data splitting and pipeline for training models
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.utils import shuffle
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score
# Used ML models
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import GradientBoostingClassifier
import lightgbm as lgb
from xgboost import XGBClassifier
# Miscellaneous
import warnings
from prettytable import PrettyTable
# Declaration
warnings.filterwarnings('ignore')
get_ipython().run_line_magic('precision', '2')
get_ipython().run_line_magic('matplotlib', 'inline')
sns.set(font_scale=1)
# ### Load Data
# In[2]:
import json
with open(r"transactions.txt","r+") as f:
llst_transaction_records = f.readlines()
llst_transaction_records = [json.loads(item) for item in llst_transaction_records]
ldf_training_dataset = pd.DataFrame(llst_transaction_records)
ldf_training_dataset['transactionDateTime'] = pd.to_datetime(ldf_training_dataset['transactionDateTime'])
# In[3]:
ldf_training_dataset.head()
# ## Question 1: Load
# ##### Statistical summary
# In[4]:
print("Number of records: "+str(len(ldf_training_dataset)))
# In[5]:
ldf_training_dataset.describe()
# In[6]:
ldf_training_dataset.info()
# ##### Checking for null or NA values
# In[8]:
ldf_training_dataset.isnull().sum()
# ##### Checking for values stored as blank ('')
# In[9]:
ldf_training_dataset.eq('').sum()
# ## Question 2: Plot
# ##### Plotting bar-chart of "isFraud`"
# In[75]:
ldf_temp = ldf_training_dataset.isFraud.value_counts().to_frame("_count")
ldf_temp.plot(kind="bar")
# ##### Plotting histogram of "transactionAmount"
# In[76]:
ldf_training_dataset['transactionAmount'].plot.hist()
# ## Question 3: Data Wrangling - Duplicate Transactions
# #### To identify Multi-swipe transactions, please see the explanation in Word documents)
# In[77]:
ldf_training_dataset['IsDuplicate'] = (ldf_training_dataset.sort_values(['transactionDateTime'])
.groupby(['accountNumber', 'transactionAmount', 'merchantName'], sort=False)['transactionDateTime']
.diff()
.dt.total_seconds()
.lt(300))
# In[78]:
ldf_training_dataset.IsDuplicate.value_counts().to_frame("Count")
# #### To identify Reversed transactions, using the Transaction Type column
# In[79]:
ldf_training_dataset.transactionType.value_counts().to_frame("Count")
# ## Estimating the total number and dollar amount of Normal & Duplicate Transactions.
# In[80]:
Model_Accuracy = PrettyTable()
Model_Accuracy.field_names = ["","No. of Transactions", "Doller Amount"]
Model_Accuracy.align[""] = "r"
ldf_training_dataset_normal = ldf_training_dataset[(ldf_training_dataset['transactionType']!="REVERSAL") &
(ldf_training_dataset['IsDuplicate'] == False)]
Model_Accuracy.add_row(["Normal (Excluding Duplicates)",len(ldf_training_dataset_normal),
round(sum(ldf_training_dataset_normal['transactionAmount']))])
print("\nA - Normal (Excluding Duplicates)")
print(Model_Accuracy)
# In[81]:
Model_Accuracy = PrettyTable()
Model_Accuracy.field_names = ["","No. of Transactions", "Doller Amount"]
Model_Accuracy.align[""] = "r"
ldf_training_dataset_rev = ldf_training_dataset[ldf_training_dataset["transactionType"] == "REVERSAL"]
ldf_training_dataset_multi_swipe = ldf_training_dataset[(ldf_training_dataset['transactionType']!="REVERSAL") &
(ldf_training_dataset['IsDuplicate'] == True)]
Model_Accuracy.add_row(["Reversal",len(ldf_training_dataset_rev),
round(sum(ldf_training_dataset_rev['transactionAmount']))])
Model_Accuracy.add_row(["Multi-Swipe",len(ldf_training_dataset_multi_swipe),
round(sum(ldf_training_dataset_multi_swipe['transactionAmount']))])
Model_Accuracy.add_row(["","----","----"])
Model_Accuracy.add_row(["Reversal + Multi-Swipe",int(len(ldf_training_dataset_rev)+
len(ldf_training_dataset_multi_swipe)),
round(sum(ldf_training_dataset_rev['transactionAmount']))+
round(sum(ldf_training_dataset_multi_swipe['transactionAmount']))])
print("\nB - Duplicates(Reversal + Multi-Swipe)")
print(Model_Accuracy)
# In[82]:
# =====================================================================================================
Model_Accuracy = PrettyTable()
Model_Accuracy.field_names = ["","No. of Transactions", "Doller Amount"]
Model_Accuracy.align[""] = "r"
Model_Accuracy.add_row(["A + B",len(ldf_training_dataset_normal)+
(len(ldf_training_dataset_rev)+len(ldf_training_dataset_multi_swipe)),
(round(sum(ldf_training_dataset_rev['transactionAmount']))+
round(sum(ldf_training_dataset_multi_swipe['transactionAmount'])))+
round(sum(ldf_training_dataset_normal['transactionAmount']))])
print("\nC = A + B (Normal & Duplicate both)")
print(Model_Accuracy)
# ##Question 4: Model
# ### Sampling to make the dataset balanced
# In[83]:
ldf_training_dataset.isFraud.value_counts().to_frame("Count")
# Since the number of isFraud=True cases is very low as compared to the isFraud=False cases,
# we will take out a balanced sample by keeping all the records of isFraud=True cases and taking only 18,000 records of isFraud=False cases.
# In[84]:
ldf_training_dataset_2 = ldf_training_dataset.groupby('isFraud', group_keys=False).apply(lambda x:x.sample(min(len(x), 18000)))
ldf_training_dataset_2 = ldf_training_dataset_2.reset_index(drop=True)
print("Number of records after under-sampling: "+str(len(ldf_training_dataset_2.isFraud)))
# In[85]:
ldf_training_dataset_2.isFraud.value_counts().to_frame("Count")
# ##### Removing the duplicate transactions (Reversal and Multi-swipe)
# In[86]:
ldf_training_dataset_3 = ldf_training_dataset_2[(ldf_training_dataset_2['transactionType']!="REVERSAL") &
(ldf_training_dataset_2['IsDuplicate'] == False)]
# ## Predictive model to determine whether a given transaction will be fraudulent or not
# #### Select input variables
# In[87]:
input_col = ['creditLimit', 'transactionAmount', 'merchantCountryCode',
'merchantCategoryCode','cardPresent', 'posEntryMode', 'posConditionCode',
'acqCountry', 'currentBalance', 'isFraud']
ldf_training_dataset_clf = ldf_training_dataset_3[input_col]
# #### Convert categorical columns to dummy variables
# In[88]:
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["creditLimit"], prefix='FRAUD_CLASSIFIER_creditLimit')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("creditLimit", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["merchantCountryCode"], prefix='FRAUD_CLASSIFIER_merchantCountryCode')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("merchantCountryCode", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["merchantCategoryCode"], prefix='FRAUD_CLASSIFIER_merchantCategoryCode')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("merchantCategoryCode", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["cardPresent"], prefix='FRAUD_CLASSIFIER_cardPresent')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("cardPresent", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["posEntryMode"], prefix='FRAUD_CLASSIFIER_posEntryMode')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("posEntryMode", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["posConditionCode"], prefix='FRAUD_CLASSIFIER_posConditionCode')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("posConditionCode", axis=1, inplace=True)
feature_dummy = pd.get_dummies(ldf_training_dataset_clf["acqCountry"], prefix='FRAUD_CLASSIFIER_acqCountry')
ldf_training_dataset_clf = pd.concat([ldf_training_dataset_clf, feature_dummy], axis = 1)
ldf_training_dataset_clf.drop("acqCountry", axis=1, inplace=True)
# #### Train Test & Validation Split
# In[97]:
y = ldf_training_dataset_clf.isFraud
X = ldf_training_dataset_clf.drop("isFraud", axis=1)
my_tags = y
# In[98]:
# splitting X and y into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=123, shuffle=True)
# In[99]:
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ### Algorithm 1: Support Vector Machines
# ##### Define a model
# In[89]:
clf_svm = SGDClassifier(
loss='hinge',
penalty='l2',
alpha=1e-3,
random_state=42,
max_iter=5,
tol=None)
# ##### Fit model on training data
# In[102]:
svm_model = clf_svm.fit(X_train, y_train)
# ##### Predicting on test data
# In[103]:
svm_y_pred = svm_model.predict(X_test)
# ##### Model evaluation metrics: Accuracy, Precision, Recall, F1 Score
# In[95]:
confusion_matrix(y_test,svm_y_pred)
# In[109]:
print('Accuracy %s' % accuracy_score(svm_y_pred, y_test))
print('Precision:', precision_score(svm_y_pred, y_test, average='weighted'))
print('Recall:', recall_score(svm_y_pred, y_test,average='weighted'))
print('F1 score:', f1_score(svm_y_pred, y_test,average='weighted'))
# ### Algorithm 2: Light GBM (Light Gradient Boosting Machine Classifier)
# ##### Define a model
# In[112]:
clf_lgbm = lgb.LGBMClassifier()
# ##### Fit model on training data
# In[ ]:
lgbm_model = clf_lgbm.fit(X_train, y_train)
# ##### Predicting on test data
# In[ ]:
lgbm_y_pred = lgbm_model.predict(X_test)
# ##### Model evaluation metrics: Accuracy, Precision, Recall, F1 Score
# In[113]:
confusion_matrix(y_test,lgbm_y_pred)
# In[114]:
print('Accuracy %s' % accuracy_score(lgbm_y_pred, y_test))
print('Precision:', precision_score(lgbm_y_pred, y_test, average='weighted'))
print('Recall:', recall_score(lgbm_y_pred, y_test,average='weighted'))
print('F1 score:', f1_score(lgbm_y_pred, y_test,average='weighted'))
# ### Algorithm 3: XGB (Extreme Gradient Boost Classifier)
# ##### Define a model
# In[116]:
clf_xgb = XGBClassifier()
# ##### Fit model on training data
# In[117]:
xgb_model = clf_xgb.fit(X_train, y_train)
# ##### Predicting on test data
# In[118]:
xgb_y_pred = xgb_model.predict(X_test)
# ##### Model evaluation metrics: Accuracy, Precision, Recall, F1 Score
# In[119]:
confusion_matrix(y_test,xgb_y_pred)
# In[120]:
print('Accuracy %s' % accuracy_score(xgb_y_pred, y_test))
print('Precision:', precision_score(xgb_y_pred, y_test, average='weighted'))
print('Recall:', recall_score(xgb_y_pred, y_test,average='weighted'))
print('F1 score:', f1_score(xgb_y_pred, y_test,average='weighted'))
# <br>
# ## Using an estimate of performance using an appropriate sample
# ### Model Performance Comparison
# In[121]:
models = {'SVM': [accuracy_score(svm_y_pred, y_test),
precision_score(svm_y_pred, y_test, average='weighted'),
recall_score(svm_y_pred, y_test, average='weighted'),
f1_score(svm_y_pred, y_test,average='weighted')],
'LGBM': [accuracy_score(lgbm_y_pred, y_test),
precision_score(lgbm_y_pred, y_test, average='weighted'),
recall_score(lgbm_y_pred, y_test,average='weighted'),
f1_score(lgbm_y_pred, y_test,average='weighted')],
'XGB': [accuracy_score(xgb_y_pred, y_test),
precision_score(xgb_y_pred, y_test, average='weighted'),
recall_score(xgb_y_pred, y_test,average='weighted'),
f1_score(xgb_y_pred, y_test,average='weighted')]
}
df_models = | pd.DataFrame(models, index=['Accuracy', 'Precision', 'Recall', 'F1-Score']) | pandas.DataFrame |
import pandas as pd
from pandas._testing import assert_frame_equal
import pytest
import numpy as np
from scripts.normalize_data import (
remove_whitespace_from_column_names,
normalize_expedition_section_cols,
remove_bracket_text,
remove_whitespace,
ddm2dec,
remove_empty_unnamed_columns,
normalize_columns
)
class RemoveSpacesFromColumns:
def test_replaces_leading_and_trailing_spaces_from_columns(self):
df = pd.DataFrame(columns=[' Aa', 'Bb12 ', ' Cc', 'Dd ', ' Ed Ed ', ' 12 ' ])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb12', 'Cc', 'Dd', 'Ee Ee', '12']
def test_returns_columns_if_no_leading_and_trailing_spaces(self):
df = pd.DataFrame(columns=['Aa', 'Bb', 'Cc', 'Dd', 'Ed Ed'])
res = remove_whitespace_from_column_names(df)
assert res == ['Aa', 'Bb', 'Cc', 'Dd', 'Ee Ee' ]
class TestNormalizeExpeditionSectionCols:
def test_dataframe_does_not_change_if_expection_section_columns_exist(self):
data = {
"Col": [0, 1],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Sample_exist(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_dataframe_does_not_change_if_expection_section_Label_exist(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Label(self):
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Label ID": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_adds_missing_expection_section_using_Sample(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
}
df = pd.DataFrame(data)
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3-a", "10-U2H-20T-3-A"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
"A/W": ["a", "A"],
}
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_missing_aw_col(self):
data = {
"Col": [0, 1],
"Sample": ["1-U1h-2t-3", "10-U2H-20T-3"],
"Exp": ["1", "10"],
"Site": ["U1", "U2"],
"Hole": ["h", "H"],
"Core": ["2", "20"],
"Type": ["t", "T"],
"Section": ["3", "3"],
}
df = pd.DataFrame(data)
expected = pd.DataFrame(data)
df = normalize_expedition_section_cols(df)
assert_frame_equal(df, expected)
def test_handles_no_data(self):
data = {
"Col": [0],
"Sample": ["No data this hole"],
}
df = | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.integrate import odeint
def append_df(df, ret, t, nivel_isolamento):
"""
Append the dataframe
:param df: dataframe to be appended
:param ret: solution of the SEIR
:param t: time to append
:param nivel_isolamento: string "without isolation" and "elderly isolation"
:return: df appended
"""
(Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj,
WARD_excess_i, WARD_excess_j, Ui, Uj, ICU_excess_i, ICU_excess_j, Mi, Mj,
pHi, pHj, pUi, pUj, pMi, pMj,
WARD_survive_i, WARD_survive_j,
WARD_death_i, WARD_death_j,
ICU_survive_i, ICU_survive_j,
ICU_death_i, ICU_death_j,
WARD_discharged_ICU_survive_i,
WARD_discharged_ICU_survive_j) = ret.T
df = df.append(pd.DataFrame({'Si': Si, 'Sj': Sj, 'Ei': Ei, 'Ej': Ej,
'Ii': Ii, 'Ij': Ij, 'Ri': Ri, 'Rj': Rj,
'Hi': Hi, 'Hj': Hj,
'WARD_excess_i': WARD_excess_i, 'WARD_excess_j': WARD_excess_j, 'Ui': Ui, 'Uj': Uj,
'ICU_excess_i': ICU_excess_i, 'ICU_excess_j': ICU_excess_j, 'Mi': Mi, 'Mj': Mj,
'pHi': pHi, 'pHj': pHj, 'pUi': pUi, 'pUj': pUj,
'pMi': pMi, 'pMj': pMj,
'WARD_survive_i': WARD_survive_i, 'WARD_survive_j': WARD_survive_j,
'WARD_death_i': WARD_death_i,'WARD_death_j': WARD_death_j,
'ICU_survive_i':ICU_survive_i,'ICU_survive_j': ICU_survive_j,
'ICU_death_i' : ICU_death_i,'ICU_death_j': ICU_death_j,
'WARD_discharged_ICU_survive_i': WARD_discharged_ICU_survive_i,
'WARD_discharged_ICU_survive_j':WARD_discharged_ICU_survive_j },
index=t)
.assign(isolamento=nivel_isolamento))
return df
def run_SEIR_ODE_model(covid_parameters, model_parameters) -> pd.DataFrame:
"""
Runs the simulation
:param covid_parameters:
:param model_parameters:
:return: DF_list
pd.DataFrame with results for SINGLE RUN
list of pd.DataFrame for SENSITIVITY ANALYSIS AND CONFIDENCE INTERVAL
"""
cp = covid_parameters
mp = model_parameters
# Variaveis apresentadas em base diaria
# A grid of time points (in days)
t = range(mp.t_max)
# CONDICOES INICIAIS
# Initial conditions vector
SEIRHUM_0_0 = initial_conditions(mp)
niveis_isolamento = mp.isolation_level # ["without_isolation", "elderly_isolation"]
if mp.IC_analysis == 4: # mp.analysis == 'Rt'
df_rt_city = mp.df_rt_city
runs = len(cp.alpha)
print('Rodando ' + str(runs) + ' casos')
print('Para ' + str(mp.t_max) + ' dias')
print('Para cada um dos ' + str(len(niveis_isolamento))
+ ' niveis de isolamento de entrada')
print('')
aNumber = 180 # TODO: check 180 or comment
tNumber = mp.t_max // aNumber
tNumberEnd = mp.t_max % aNumber
if tNumberEnd != 0:
aNumber += 1
else:
tNumberEnd = tNumber
DF_list = list() # list of data frames
for ii in range(runs): # sweeps the data frames list
df = pd.DataFrame()
for i in range(len(niveis_isolamento)):
# 1: without; 2: vertical
# Integrate the SEIR equations over the time grid, t
# PARAMETROS PARA CALCULAR DERIVADAS
args = args_assignment(cp, mp, i, ii)
argslist = list(args)
SEIRHUM_0 = SEIRHUM_0_0
t = range(tNumber)
ret = odeint(derivSEIRHUM, SEIRHUM_0, t, args)
(Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj,
WARD_excess_i, WARD_excess_j, Ui, Uj, ICU_excess_i, ICU_excess_j, Mi, Mj,
pHi, pHj, pUi, pUj, pMi, pMj,
WARD_survive_i, WARD_survive_j,
WARD_death_i, WARD_death_j,
ICU_survive_i, ICU_survive_j,
ICU_death_i, ICU_death_j,
WARD_discharged_ICU_survive_i,
WARD_discharged_ICU_survive_j) = ret.T
contador = 0
for a in range(aNumber):
if a == aNumber - 1:
t = range(tNumberEnd + 1)
else:
t = range(tNumber + 1)
SEIRHUM_0 = tuple([x[-1] for x in [Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj,
WARD_excess_i, WARD_excess_j, Ui, Uj, ICU_excess_i, ICU_excess_j, Mi, Mj,
pHi, pHj, pUi, pUj, pMi, pMj,
WARD_survive_i, WARD_survive_j,
WARD_death_i, WARD_death_j,
ICU_survive_i, ICU_survive_j,
ICU_death_i, ICU_death_j,
WARD_discharged_ICU_survive_i,
WARD_discharged_ICU_survive_j] ])
retTemp = odeint(derivSEIRHUM, SEIRHUM_0, t, args)
ret = retTemp[1:]
(Si, Sj, Ei, Ej, Ii, Ij, Ri, Rj, Hi, Hj,
WARD_excess_i, WARD_excess_j, Ui, Uj, ICU_excess_i, ICU_excess_j, Mi, Mj,
pHi, pHj, pUi, pUj, pMi, pMj,
WARD_survive_i, WARD_survive_j,
WARD_death_i, WARD_death_j,
ICU_survive_i, ICU_survive_j,
ICU_death_i, ICU_death_j,
WARD_discharged_ICU_survive_i,
WARD_discharged_ICU_survive_j) = ret.T
t = t[1:]
contador += 1
if a < mp.initial_deaths_to_fit:
# TODO: comentar por que 43 e -3
effectiver = df_rt_city.iloc[(contador + 43), -3] # np.random.random()/2 + 1
print(effectiver)
argslist[2] = (cp.gamma[ii] * effectiver * mp.population) / (Si[-1] + Sj[-1])
args = tuple(argslist)
elif a == mp.initial_deaths_to_fit:
# TODO: comentar por que 1.17
argslist[2] = (cp.gamma[ii] * 1.17 * mp.population) / (Si[-1] + Sj[-1])
else:
# print(argslist[2])
pass
df = append_df(df, ret, t, niveis_isolamento[i])
DF_list.append(df)
elif mp.IC_analysis == 2: # mp.analysis == 'Single Run'
ii = 1
df = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
dataset_dir = 'D:/Downloads/test'
csv_origin = './example.csv'
csv_unet = './unet.csv'
csv_submit = './rle_submit.csv'
def generate_final_csv(df_with_ship):
print("最终提交版本 : %d instances, %d images" %(df_with_ship.shape[0], len(get_im_list(df_with_ship))))
im_no_ship = get_im_no_ship(df_with_ship)
# write dataframe into .csv file
df_empty = pd.DataFrame({'ImageId':im_no_ship, 'EncodedPixels':get_empty_list(len(im_no_ship))})
df_submit = pd.concat([df_with_ship, df_empty], sort=False)
df_submit.drop(['area','confidence'], axis=1, inplace=True)
df_submit.to_csv(csv_submit, index=False,sep=str(',')) # str(',') is needed
print('Generate successfully!')
def get_im_no_ship(df_with_ship):
im_all = os.listdir(dataset_dir)
im_no_ship = list(set(im_all).difference(set(df_with_ship['ImageId'].tolist())))
return im_no_ship
def get_empty_list(length):
list_empty = []
for _ in range(length):
list_empty.append('')
return list_empty
def get_im_list(df):
df_with_ship = df[df['EncodedPixels'].notnull()]['ImageId']
return list(set(df_with_ship))
if __name__ == '__main__':
# 第一步:筛选detectron检测结果,主要根据阈值、面积
df = | pd.read_csv(csv_origin) | pandas.read_csv |
import pandas as pd
from pandas import DataFrame
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import f_regression
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR, LinearSVR
from metalfi.src.data.dataset import Dataset
from metalfi.src.data.memory import Memory
from metalfi.src.data.metadataset import MetaDataset
from metalfi.src.model.evaluation import Evaluation
from metalfi.src.model.featureselection import MetaFeatureSelection
from metalfi.src.model.metamodel import MetaModel
class Controller:
def __init__(self):
self.__train_data = None
self.__data_names = None
self.__meta_data = list()
self.fetchData()
self.storeMetaData()
self.__targets = ["linSVC_SHAP", "LOG_SHAP", "RF_SHAP", "NB_SHAP", "SVC_SHAP",
"linSVC_LIME", "LOG_LIME", "RF_LIME", "NB_LIME", "SVC_LIME",
"linSVC_PIMP", "LOG_PIMP", "RF_PIMP", "NB_PIMP", "SVC_PIMP",
"linSVC_LOFO", "LOG_LOFO", "RF_LOFO", "NB_LOFO", "SVC_LOFO"]
self.__meta_models = [(RandomForestRegressor(n_estimators=100, n_jobs=4), "RF"),
(SVR(), "SVR"),
(LinearRegression(n_jobs=4), "LIN"),
(LinearSVR(dual=True, max_iter=10000), "linSVR")]
def getTrainData(self):
return self.__train_data
def fetchData(self):
data_frame, target = Memory.loadTitanic()
data_1 = Dataset(data_frame, target)
data_frame_2, target_2 = Memory.loadCancer()
data_2 = Dataset(data_frame_2, target_2)
data_frame_3, target_3 = Memory.loadIris()
data_3 = Dataset(data_frame_3, target_3)
data_frame_4, target_4 = Memory.loadWine()
data_4 = Dataset(data_frame_4, target_4)
data_frame_5, target_5 = Memory.loadBoston()
data_5 = Dataset(data_frame_5, target_5)
open_ml = [(Dataset(data_frame, target), name) for data_frame, name, target in Memory.loadOpenML()]
self.__train_data = [(data_1, "Titanic"), (data_2, "Cancer"), (data_3, "Iris"), (data_4, "Wine"),
(data_5, "Boston")] + open_ml
self.__data_names = dict({})
i = 0
for data, name in self.__train_data:
self.__data_names[name] = i
i += 1
def storeMetaData(self):
for dataset, name in self.__train_data:
if not (Memory.getPath() / ("input/" + name + "meta.csv")).is_file():
print("meta-data calc.: " + name)
meta = MetaDataset(dataset, True)
data = meta.getMetaData()
d_times, t_times = meta.getTimes()
nr_feat, nr_inst = meta.getNrs()
Memory.storeInput(data, name)
Memory.storeDataFrame(DataFrame(data=d_times, index=["Time"], columns=[x for x in d_times]),
name + "XmetaX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
Memory.storeDataFrame(DataFrame(data=t_times, index=["Time"], columns=[x for x in t_times]),
name + "XtargetX" + str(nr_feat) + "X" + str(nr_inst), "runtime")
def loadMetaData(self):
for dataset, name in self.__train_data:
sc = StandardScaler()
data = Memory.load(name + "meta.csv", "input")
fmf = [x for x in data.columns if "." not in x]
dmf = [x for x in data.columns if "." in x]
X_f = DataFrame(data=sc.fit_transform(data[fmf]), columns=fmf)
X_d = DataFrame(data=data[dmf], columns=dmf)
data_frame = | pd.concat([X_d, X_f], axis=1) | pandas.concat |
# installed
import pandas as pd
import numpy as np
import talib
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import ParameterGrid
import matplotlib.pyplot as plt
# custom
import data_processing as dp
def load_stocks_calculate_short_corr():
dfs, sh_int, fin_sh = dp.load_stocks(stocks=None)
latest_stocks = []
all_sh_stocks = []
all_sh_stocks_full = []
latest_date = sh_int['SPY'].index[-1]
for s in sh_int.keys():
if sh_int[s].shape[0] == 0:
print(s, 'is empty')
continue
if latest_date != sh_int[s].index[-1]:
print(s, 'is old')
continue
print(s)
df = sh_int[s].copy()
df['5d_price_change'] = df['Adj_Close'].pct_change(5).shift(-5)
df['10d_price_change'] = df['Adj_Close'].pct_change(10).shift(-10)
df['20d_price_change'] = df['Adj_Close'].pct_change(20).shift(-20)
df['40d_price_change'] = df['Adj_Close'].pct_change(40).shift(-40)
df['ticker'] = s
# create short-close correlations -- need to deal with -1s
# if short % is all -1 or 0, won't work. if less than 20 samples, rolling corr with 20 period window won't work
# also broke on DF with 22 samples
if df['Short_%_of_Float'].mean() in [-1, 0] or df.shape[0] < 30:
df['Short_%_of_Float_10d_EMA'] = -np.inf
df['Adj_Close_10d_EMA'] = talib.EMA(df['Adj_Close'].values, timeperiod=10)
df['short_close_corr_10d_EMA'] = -np.inf
df['short_close_corr_rocr_20d'] = -np.inf
df['short_%_rocr_20d'] = -np.inf
else:
df['Short_%_of_Float_10d_EMA'] = talib.EMA(df['Short_%_of_Float'].values, timeperiod=10)
df['Adj_Close_10d_EMA'] = talib.EMA(df['Adj_Close'].values, timeperiod=10)
# essentially, we want to take an arbitrary number of points, calculate correlation, and find where the correlations are largest
# take 10 points at a time and get correlations first, then take parts that have largest correlations, and keep expanding by 5 points at a time until correlation decreases
corr = df[['Short_%_of_Float_10d_EMA', 'Adj_Close_10d_EMA']].rolling(window=20).corr()
df['short_close_corr_10d_EMA'] = corr.unstack(1)['Short_%_of_Float_10d_EMA']['Adj_Close_10d_EMA']
df['short_close_corr_10d_EMA'].replace(np.inf, 1, inplace=True)
df['short_close_corr_10d_EMA'].replace(-np.inf, -1, inplace=True)
df['short_close_corr_10d_EMA'].clip(lower=-1, upper=1, inplace=True)
# WARNING: things with small (< 1%) Short % of float will result in huge rocr...maybe do something about this
df['short_close_corr_rocr_20d'] = talib.ROCR100(df['short_close_corr_10d_EMA'].values, timeperiod=20)
df['short_%_rocr_20d'] = talib.ROCR100(df['Short_%_of_Float_10d_EMA'].values, timeperiod=20)
# auto-detect long stretches of negative and positive correlation
thresh = 0.7
rolling = df['short_close_corr_10d_EMA'].rolling(window=20).min()
df['Short_%_positive_corr_detection'] = rolling > thresh
df['Short_%_positive_corr_detection'] = df['Short_%_positive_corr_detection'].astype('int16')
# sh_int[ticker]['Short_%_positive_corr_detection'].plot()
# plt.show()
df['Short_%_negative_corr_detection'] = rolling < -thresh
df['Short_%_negative_corr_detection'] = df['Short_%_negative_corr_detection'].astype('int16')
latest_stocks.append(df.iloc[-1])
all_sh_stocks_full.append(df)
all_sh_stocks.append(df.dropna())
latest_stocks_df = pd.concat(latest_stocks, axis=1).T
latest_stocks_df.set_index('ticker', inplace=True)
all_sh_stocks_df = | pd.concat(all_sh_stocks) | pandas.concat |
"""
The SamplesFrame class is an extended Pandas DataFrame, offering additional methods
for validation of hydrochemical data, calculation of relevant ratios and classifications.
"""
import logging
import numpy as np
import pandas as pd
from phreeqpython import PhreeqPython
from hgc.constants import constants
from hgc.constants.constants import mw
@pd.api.extensions.register_dataframe_accessor("hgc")
class SamplesFrame(object):
"""
DataFrame with additional hydrochemistry-specific methods.
All HGC methods and attributes defined in this class are available
in the namespace 'hgc' of the Dataframe.
Examples
--------
To use HGC methods, we always start from a Pandas DataFrame::
import pandas as pd
import hgc
# We start off with an ordinary DataFrame
df = pd.DataFrame({'Cl': [1,2,3], 'Mg': [11,12,13]})
# Since we imported hgc, the HGC-methods become available
# on the DataFrame. This allows for instance to use HGC's
# validation function
df.hgc.is_valid
False
df.hgc.make_valid()
"""
def __init__(self, pandas_obj):
self._obj = pandas_obj
self._pp = PhreeqPython() # bind 1 phreeqpython instance to the dataframe
self._valid_atoms = constants.atoms
self._valid_ions = constants.ions
self._valid_properties = constants.properties
@staticmethod
def _clean_up_phreeqpython_solutions(solutions):
"""
This is a convenience function that removes all
the phreeqpython solution in `solutions` from
memory.
Parameters
----------
solutions : list
python list containing of phreeqpython solutions
"""
_ = [s.forget() for s in solutions]
def _check_validity(self, verbose=True):
"""
Check if the dataframe is a valid HGC dataframe
Notes
-----
Checks are:
1. Are there any columns names in the recognized parameter set?
2. Are there no strings in the recognized columns (except '<' and '>')?
3. Are there negative concentrations in the recognized columns?
"""
obj = self._obj
if verbose:
logging.info("Checking validity of DataFrame for HGC...")
# Define allowed columns that contain concentration values
allowed_concentration_columns = (list(constants.atoms.keys()) +
list(constants.ions.keys()))
hgc_cols = self.hgc_cols
neg_conc_cols = []
invalid_str_cols = []
# Check the columns for (in)valid values
for col in hgc_cols:
# check for only numeric values
if obj[col].dtype in ('object', 'str'):
if not all(obj[col].str.isnumeric()):
invalid_str_cols.append(col)
# check for non-negative concentrations
elif (col in allowed_concentration_columns) and (any(obj[col] < 0)):
neg_conc_cols.append(col)
is_valid = ((len(hgc_cols) > 0) and (len(neg_conc_cols) == 0) and (len(invalid_str_cols) == 0))
if verbose:
logging.info(f"DataFrame contains {len(hgc_cols)} HGC-columns")
if len(hgc_cols) > 0:
logging.info(f"Recognized HGC columns are: {','.join(hgc_cols)}")
logging.info(f'These columns of the dataframe are not used by HGC: {set(obj.columns)-set(hgc_cols)}')
logging.info(f"DataFrame contains {len(neg_conc_cols)} HGC-columns with negative concentrations")
if len(neg_conc_cols) > 0:
logging.info(f"Columns with negative concentrations are: {','.join(neg_conc_cols)}")
logging.info(f"DataFrame contains {len(invalid_str_cols)} HGC-columns with invalid values")
if len(invalid_str_cols) > 0:
logging.info(f"Columns with invalid strings are: {','.join(invalid_str_cols)}. Only '<' and '>' and numeric values are allowed.")
if is_valid:
logging.info("DataFrame is valid")
else:
logging.info("DataFrame is not HGC valid. Use the 'make_valid' method to automatically resolve issues")
return is_valid
@property
def allowed_hgc_columns(self):
""" Returns allowed columns of the hgc SamplesFrame"""
return (list(constants.atoms.keys()) +
list(constants.ions.keys()) +
list(constants.properties.keys()))
@property
def hgc_cols(self):
""" Return the columns that are used by hgc """
return [item for item in self.allowed_hgc_columns if item in self._obj.columns]
@property
def is_valid(self):
""" returns a boolean indicating that the columns used by hgc have
valid values """
is_valid = self._check_validity(verbose=False)
return is_valid
def _make_input_df(self, cols_req):
"""
Make input DataFrame for calculations. This DataFrame contains columns for each required parameter,
which is 0 in case the parameter is not present in original HGC frame. It also
replaces all NaN with 0.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
df_in = pd.DataFrame(columns=cols_req)
for col_req in cols_req:
if col_req in self._obj:
df_in[col_req] = self._obj[col_req]
else:
logging.info(f"Column {col_req} is not present in DataFrame, assuming concentration 0 for this compound for now.")
df_in = df_in.fillna(0.0)
return df_in
def _replace_detection_lim(self, rule="half"):
"""
Substitute detection limits according to one of the available
rules. Cells that contain for example '<0.3' or '> 0.3' will be replaced
with 0.15 and 0.45 respectively (in case of rule "half").
Parameters
----------
rule : str, default 'half'
Can be any of "half" or "at"... Rule "half" replaces cells with detection limit for half of the value.
Rule "at" replaces detection limit cells with the exact value of the detection limit.
"""
for col in self.hgc_cols:
if self._obj[col].dtype in ('object', 'str'):
is_below_dl = self._obj[col].str.contains(pat=r'^[<]\s*\d').fillna(False)
is_above_dl = self._obj[col].str.contains(pat=r'^[>]\s*\d').fillna(False)
if rule == 'half':
self._obj.loc[is_below_dl, col] = self._obj.loc[is_below_dl, col].str.extract(r'(\d+)').astype(np.float64) / 2
self._obj.loc[is_above_dl, col] = self._obj.loc[is_above_dl, col].str.extract(r'(\d+)').astype(np.float64) + \
(self._obj.loc[is_above_dl, col].str.extract(r'(\d+)').astype(np.float64) / 2)
elif rule == 'on':
self._obj[col] = self._obj.loc[col].str.extract(r'(\d+)').astype(np.float64)
def _replace_negative_concentrations(self):
"""
Replace any negative concentrations with 0.
"""
# Get all columns that represent chemical compounds
# Replace negatives with 0
for col in self.hgc_cols:
self._obj.loc[self._obj[col] < 0, col] = 0
def _cast_datatypes(self):
"""
Convert all HGC-columns to their correct data type.
"""
for col in self.hgc_cols:
if self._obj[col].dtype in ('object', 'str'):
self._obj[col] = pd.to_numeric(self._obj[col], errors='coerce')
def consolidate(self, use_ph='field', use_ec='lab', use_so4='ic', use_o2='field',
use_temp='field', use_alkalinity='alkalinity',
merge_on_na=False, inplace=True):
"""
Consolidate parameters measured with different methods to one single parameter.
Parameters such as EC and pH are frequently measured both in the lab and field,
and SO4 and PO4 are frequently measured both by IC and ICP-OES. Normally we prefer the
field data for EC and pH, but ill calibrated sensors or tough field circumstances may
prevent these readings to be superior to the lab measurement. This method allows for quick
selection of the preferred measurement method for each parameter and select that for further analysis.
For each consolidated parameter HGC adds a new column that is either filled with the lab measurements or the field
measurements. It is also possible to fill it with the preferred method, and fill remaining NaN's with
measurements gathered with the other possible method.
Parameters
----------
use_ph : {'lab', 'field', None}, default 'field'
Which pH to use? Ignored if None.
use_ec : {'lab', 'field', None}, default 'lab'
Which EC to use?
use_so4 : {'ic', 'field', None}, default 'ic'
Which SO4 to use?
use_o2 : {'lab', 'field', None}, default 'field'
Which O2 to use?
use_alkalinity: str, default 'alkalinity'
name of the column to use for alkalinity
merge_on_na : bool, default False
Fill NaN's from one measurement method with measurements from other method.
inplace : bool, default True
Modify SamplesFrame in place. inplace=False is not allowed
Raises
------
ValueError: if one of the `use_` parameters is set to a column that is not in the dataframe
*or* if one of the default parameters is not in the dataframe while it is not
set to None.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
if inplace is False:
raise NotImplementedError('inplace=False is not (yet) implemented.')
param_mapping = {
'ph': use_ph,
'ec': use_ec,
'SO4': use_so4,
'O2': use_o2,
'temp': use_temp,
}
if not (use_alkalinity in ['alkalinity', None]):
try:
self._obj['alkalinity'] = self._obj[use_alkalinity]
self._obj.drop(columns=[use_alkalinity], inplace=True)
except KeyError:
raise ValueError(f"Invalid value for argument 'use_alkalinity': " +
f"{use_alkalinity}. It is not a column name of " +
f"the dataframe")
for param, method in param_mapping.items():
if not method:
# user did not specify source, ignore
continue
if not isinstance(method, str):
raise ValueError(f"Invalid method {method} for parameter {param}. Arg should be a string.")
if param in self._obj.columns:
logging.info(f"Parameter {param} already present in DataFrame, ignoring. Remove column manually to enable consolidation.")
continue
source = f"{param}_{method}"
if source in self._obj.columns:
source_val = self._obj[source]
if any(np.isnan(source_val)):
raise ValueError('Nan value for column {source}')
self._obj[param] = np.NaN
self._obj[param].fillna(source_val, inplace=True)
if merge_on_na:
raise NotImplementedError('merge_on_na is True is not implemented (yet).')
# Drop source columns
suffixes = ('_field', '_lab', '_ic')
cols = [param + suffix for suffix in suffixes]
self._obj.drop(columns=cols, inplace=True, errors='ignore')
else:
raise ValueError(f"Column {source} not present in DataFrame. Use " +
f"use_{param.lower()}=None to explicitly ignore consolidating " +
f"this column.")
def get_bex(self, watertype="G", inplace=True):
"""
Get Base Exchange Index (meq/L). By default this is the BEX without dolomite.
Parameters
----------
watertype : {'G', 'P'}, default 'G'
Watertype (Groundwater or Precipitation)
Returns
-------
pandas.Series
Series with for each row in the original.
"""
cols_req = ('Na', 'K', 'Mg', 'Cl')
df = self._make_input_df(cols_req)
df_out = pd.DataFrame()
#TODO: calculate alphas on the fly from SMOW constants
alpha_na = 0.556425145165362 # ratio of Na to Cl in SMOW
alpha_k = 0.0206 # ratio of K to Cl in SMOW
alpha_mg = 0.0667508204998738 # ratio of Mg to Cl in SMOW
only_p_and_t = True
if watertype == "P" and only_p_and_t:
df_out['Na_nonmarine'] = df['Na'] - 1.7972 * alpha_na*df['Na']
df_out['K_nonmarine'] = df['K'] - 1.7972 * alpha_k*df['Na']
df_out['Mg_nonmarine'] = df['Mg'] - 1.7972 * alpha_mg*df['Na']
else:
df_out['Na_nonmarine'] = df['Na'] - alpha_na*df['Cl']
df_out['K_nonmarine'] = df['K'] - alpha_k*df['Cl']
df_out['Mg_nonmarine'] = df['Mg'] - alpha_mg*df['Cl']
df_out['bex'] = df_out['Na_nonmarine']/22.99 + df_out['K_nonmarine']/39.098 + df_out['Mg_nonmarine']/12.153
if inplace:
self._obj['bex'] = df_out['bex']
else:
return df_out['bex']
def get_ratios(self, inplace=True):
"""
Calculate common hydrochemical ratios, will ignore any ratios
in case their constituents are not present in the SamplesFrame.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Notes
-----
HGC will attempt to calculate the following ratios:
* Cl/Br
* Cl/Na
* Cl/Mg
* Ca/Sr
* Fe/Mn
* HCO3/Ca
* 2H/18O
* SUVA: UVA254/DOC
* HCO3/Sum of anions
* HCO3/Sum of Ca and Mg
* MONC
* COD/DOC
Returns
-------
pandas.DataFrame
DataFrame with computed ratios.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
df_ratios = pd.DataFrame()
ratios = {
'cl_to_br': ['Cl', 'Br'],
'cl_to_na': ['Cl', 'Na'],
'ca_to_mg': ['Cl', 'Mg'],
'ca_to_sr': ['Ca', 'Sr'],
'fe_to_mn': ['Fe', 'Mn'],
'hco3_to_ca': ['alkalinity', 'Ca'],
'2h_to_18o': ['2H', '18O'],
'suva': ['uva254', 'doc'],
'hco3_to_sum_anions': ['alkalinity', 'sum_anions'],
'hco3_to_ca_and_mg': ['alkalinity', 'Ca', 'Mg'],
'monc': ['cod', 'Fe', 'NO2', 'doc'],
'cod_to_doc': ['cod', 'Fe', 'NO2', 'doc']
}
for ratio, constituents in ratios.items():
has_cols = [const in self._obj.columns for const in constituents]
if all(has_cols):
if ratio == 'hco3_to_sum_anions':
df_ratios[ratio] = self._obj['alkalinity'] / self.get_sum_anions(inplace=False)
elif ratio == 'hco3_to_ca_and_mg':
df_ratios[ratio] = self._obj['alkalinity'] / (self._obj['Ca'] + self._obj['Mg'])
elif ratio == 'monc':
df_ratios[ratio] = 4 - 1.5 * (self._obj['cod'] - 0.143 * self._obj['Fe'] - 0.348 * self._obj['NO2']) / (3.95 * self._obj['doc'])
elif ratio == 'cod_to_doc':
df_ratios[ratio] = ((0.2532 * self._obj['cod'] - 0.143 * self._obj['Fe'] - 0.348 * self._obj['NO2']) / 32) / (self._obj['doc'] / 12)
else:
df_ratios[ratio] = self._obj[constituents[0]] / self._obj[constituents[1]]
else:
missing_cols = [i for (i, v) in zip(constituents, has_cols) if not v]
logging.info(f"Cannot calculate ratio {ratio} since columns {','.join(missing_cols)} are not present.")
if inplace:
self._obj[df_ratios.columns] = df_ratios
else:
return df_ratios
def get_stuyfzand_water_type(self, inplace=True):
"""
Get Stuyfzand water type. This water type classification contains
5 components: Salinity, Alkalinity, Dominant Cation, Dominant Anion and Base Exchange Index.
This results in a classification such as for example 'F3CaMix+'.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Returns
-------
pandas.Series
Series with Stuyfzand water type of each row in original SamplesFrame.
"""
if not self.is_valid:
raise ValueError("Method can only be used on validated HGC frames, use 'make_valid' to validate")
# Create input dataframe containing all required columns
# Inherit column values from HGC frame, assume 0 if column
# is not present
cols_req = ('Al', 'Ba', 'Br', 'Ca', 'Cl', 'Co', 'Cu', 'doc', 'F', 'Fe', 'alkalinity', 'K', 'Li', 'Mg', 'Mn', 'Na', 'Ni', 'NH4', 'NO2', 'NO3', 'Pb', 'PO4', 'ph', 'SO4', 'Sr', 'Zn')
df_in = self._make_input_df(cols_req)
df_out = pd.DataFrame(index=df_in.index)
# Salinity
df_out['swt_s'] = 'G'
df_out.loc[df_in['Cl'] > 5, 'swt_s'] = 'g'
df_out.loc[df_in['Cl'] > 30, 'swt_s'] = 'F'
df_out.loc[df_in['Cl'] > 150, 'swt_s'] = 'f'
df_out.loc[df_in['Cl'] > 300, 'swt_s'] = 'B'
df_out.loc[df_in['Cl'] > 1000, 'swt_s'] = 'b'
df_out.loc[df_in['Cl'] > 10000, 'swt_s'] = 'S'
df_out.loc[df_in['Cl'] > 20000, 'swt_s'] = 'H'
#Alkalinity
df_out['swt_a'] = '*'
df_out.loc[df_in['alkalinity'] > 31, 'swt_a'] = '0'
df_out.loc[df_in['alkalinity'] > 61, 'swt_a'] = '1'
df_out.loc[df_in['alkalinity'] > 122, 'swt_a'] = '2'
df_out.loc[df_in['alkalinity'] > 244, 'swt_a'] = '3'
df_out.loc[df_in['alkalinity'] > 488, 'swt_a'] = '4'
df_out.loc[df_in['alkalinity'] > 976, 'swt_a'] = '5'
df_out.loc[df_in['alkalinity'] > 1953, 'swt_a'] = '6'
df_out.loc[df_in['alkalinity'] > 3905, 'swt_a'] = '7'
#Dominant cation
s_sum_cations = self.get_sum_cations(inplace=False)
df_out['swt_domcat'] = self._get_dominant_anions_of_df(df_in)
# Dominant anion
s_sum_anions = self.get_sum_anions(inplace=False)
cl_mmol = df_in.Cl/mw('Cl')
hco3_mmol = df_in.alkalinity/(mw('H') + mw('C') + 3*mw('O'))
no3_mmol = df_in.NO3/(mw('N') + 3*mw('O'))
so4_mmol = df_in.SO4/(mw('S') + 4*mw('O'))
# TODO: consider renaming doman to dom_an or dom_anion
is_doman_cl = (cl_mmol > s_sum_anions/2)
df_out.loc[is_doman_cl, 'swt_doman'] = "Cl"
is_doman_hco3 = ~is_doman_cl & (hco3_mmol > s_sum_anions/2)
df_out.loc[is_doman_hco3, 'swt_doman'] = "HCO3"
is_doman_so4_or_no3 = ~is_doman_cl & ~is_doman_hco3 & (2*so4_mmol + no3_mmol > s_sum_anions/2)
is_doman_so4 = (2*so4_mmol > no3_mmol)
df_out.loc[is_doman_so4_or_no3 & is_doman_so4, 'swt_doman'] = "SO4"
df_out.loc[is_doman_so4_or_no3 & ~is_doman_so4, 'swt_doman'] = "NO3"
is_mix = ~is_doman_cl & ~is_doman_hco3 & ~is_doman_so4_or_no3
df_out.loc[is_mix, 'swt_doman'] = "Mix"
# Base Exchange Index
s_bex = self.get_bex(inplace=False)
threshold1 = 0.5 + 0.02*cl_mmol
threshold2 = -0.5-0.02*cl_mmol
is_plus = (s_bex > threshold1) & (s_bex > 1.5*(s_sum_cations-s_sum_anions))
is_minus = ~is_plus & (s_bex < threshold2) & (s_bex < 1.5*(s_sum_cations-s_sum_anions))
is_neutral = (~is_plus & ~is_minus &
(s_bex > threshold2) & (s_bex < threshold1) &
((s_sum_cations == s_sum_anions) |
((abs(s_bex + threshold1*(s_sum_cations-s_sum_anions))/abs(s_sum_cations-s_sum_anions))
> abs(1.5*(s_sum_cations-s_sum_anions)))
)
)
is_none = ~is_plus & ~is_minus & ~is_neutral
df_out.loc[is_plus, 'swt_bex'] = '+'
df_out.loc[is_minus, 'swt_bex'] = '-'
df_out.loc[is_neutral, 'swt_bex'] = 'o'
df_out.loc[is_none, 'swt_bex'] = ''
#Putting it all together
df_out['swt'] = df_out['swt_s'].str.cat(df_out[['swt_a', 'swt_domcat', 'swt_doman', 'swt_bex']])
if inplace:
self._obj['water_type'] = df_out['swt']
else:
return df_out['swt']
def _get_dominant_anions_of_df(self, df_in):
""" calculates the dominant anions of the dataframe df_in """
s_sum_cations = self.get_sum_cations(inplace=False)
cols_req = ('ph', 'Na', 'K', 'Ca', 'Mg', 'Fe', 'Mn', 'NH4', 'Al', 'Ba', 'Co', 'Cu', 'Li', 'Ni', 'Pb', 'Sr', 'Zn')
df_in = df_in.hgc._make_input_df(cols_req)
na_mmol = df_in.Na/mw('Na')
k_mmol = df_in.K/mw('K')
nh4_mmol = df_in.NH4/(mw('N')+4*mw('H'))
ca_mmol = df_in.Ca/mw('Ca')
mg_mmol = df_in.Mg/mw('Mg')
fe_mmol = df_in.Fe/mw('Fe')
mn_mmol = df_in.Mn/mw('Mn')
h_mmol = (10**-df_in.ph) / 1000 # ph -> mol/L -> mmol/L
al_mmol = 1000. * df_in.Al/mw('Al') # ug/L ->mg/L -> mmol/L
# - Na, K, NH4
# select rows that do not have Na, K or NH4 as dominant cation
is_no_domcat_na_nh4_k = (na_mmol + k_mmol + nh4_mmol) < (s_sum_cations/2)
is_domcat_nh4 = ~is_no_domcat_na_nh4_k & (nh4_mmol > (na_mmol + k_mmol))
is_domcat_na = ~is_no_domcat_na_nh4_k & ~is_domcat_nh4 & (na_mmol > k_mmol)
is_domcat_k = ~is_no_domcat_na_nh4_k & ~is_domcat_nh4 & ~is_domcat_na
# abbreviation
is_domcat_na_nh4_k = is_domcat_na | is_domcat_nh4 | is_domcat_k
# - Ca, Mg
is_domcat_ca_mg = (
# not na or nh4 or k dominant
~is_domcat_na_nh4_k & (
# should be any of Ca or Mg available
((ca_mmol > 0) | (mg_mmol > 0)) |
# should be more of Ca or Mg then sum of H, Fe, Al, Mn
# (compensated for charge)
(2*ca_mmol+2*mg_mmol < h_mmol+3*al_mmol+2*fe_mmol+2*mn_mmol)))
is_domcat_ca = is_domcat_ca_mg & (ca_mmol >= mg_mmol)
is_domcat_mg = is_domcat_ca_mg & (ca_mmol < mg_mmol)
# - H, Al, Fe, Mn
# IF(IF(h_mmol+3*IF(al_mmol)>2*(fe_mol+mn_mol),IF(h_mmol>3*al_mmol,"H","Al"),IF(fe_mol>mn_mol,"Fe","Mn")))
is_domcat_fe_mn_al_h = (
# not na, nh4, k, ca or Mg dominant
~is_domcat_na_nh4_k & ~is_domcat_ca & ~is_domcat_mg & (
# should be any of Fe, Mn, Al or H available
(fe_mmol > 0) | (mn_mmol > 0) | (h_mmol > 0) | (al_mmol > 0) # |
# # should be more of Ca or Mg then sum of H, Fe, Al, Mn
# # (compensated for charge)
# (2*ca_mmol+2*mg_mmol < h_mmol+3*al_mmol+2*fe_mmol+2*mn_mmol)
)
)
is_domcat_h_al=is_domcat_fe_mn_al_h & ((h_mmol + 3*al_mmol) > (2*fe_mmol + 2*mn_mmol))
is_domcat_h = is_domcat_h_al & (h_mmol > al_mmol)
is_domcat_al = is_domcat_h_al & (al_mmol > h_mmol)
is_domcat_fe_mn = is_domcat_fe_mn_al_h & ~is_domcat_h_al
is_domcat_fe = is_domcat_fe_mn & (fe_mmol > mn_mmol)
is_domcat_mn = is_domcat_fe_mn & (mn_mmol > fe_mmol)
sr_out = pd.Series(index=df_in.index, dtype='object')
sr_out[:] = ""
sr_out[is_domcat_nh4] = "NH4"
sr_out[is_domcat_na] = "Na"
sr_out[is_domcat_k] = "K"
sr_out[is_domcat_ca] = 'Ca'
sr_out[is_domcat_mg] = 'Mg'
sr_out[is_domcat_fe] = 'Fe'
sr_out[is_domcat_mn] = 'Mn'
sr_out[is_domcat_al] = 'Al'
sr_out[is_domcat_h] = 'H'
return sr_out
def get_dominant_anions(self, inplace=True):
""" returns a series with the dominant anions."""
if inplace:
self._obj['dominant_anion'] = self._get_dominant_anions_of_df(self._obj)
else:
return self._get_dominant_anions_of_df(self._obj)
def fillna_concentrations(self, how="phreeqc"):
"""
Calculate missing concentrations based on the charge balance.
Parameters
----------
how : {'phreeqc', 'analytic'}, default 'phreeqc'
Method to compute missing concentrations.
"""
raise NotImplementedError()
def fillna_ec(self, use_phreeqc=True):
"""
Calculate missing Electrical Conductivity measurements using
known anions and cations.
"""
if use_phreeqc:
# use get_specific_conductance method on
# all N/A rows of EC columns
raise NotImplementedError()
else:
raise NotImplementedError()
def make_valid(self):
"""
Try to convert the DataFrame into a valid HGC-SamplesFrame.
"""
# Conduct conversions here. If they fail, raise error (e.g. when not a single valid column is present)
# Important: order is important, first convert strings to double, then replace negative concentrations
self._replace_detection_lim()
self._cast_datatypes()
self._replace_negative_concentrations()
self._check_validity(verbose=True)
def get_sum_anions(self, inplace=True):
"""
Calculate sum of anions according to the Stuyfzand method.
It is assumed that only HCO<sub>3</sub><sup>-</sup> contributes to
the alkalinity.
Returns
-------
pandas.Series
Series with sum of cations for each row in SamplesFrame.
"""
cols_req = ('Br', 'Cl', 'doc', 'F', 'alkalinity', 'NO2', 'NO3', 'PO4', 'SO4', 'ph')
df_in = self._make_input_df(cols_req)
s_sum_anions = pd.Series(index=df_in.index,dtype='float64')
k_org = 10**(0.039*df_in['ph']**2 - 0.9*df_in['ph']-0.96) # HGC manual equation 3.5
a_org = k_org * df_in['doc'] / (100*k_org + (10**-df_in['ph'])/10) # HGC manual equation 3.4A
sum_ions = (df_in['Cl']/35.453 + df_in['SO4']/48.03 +
df_in['alkalinity']/61.02 + df_in['NO3']/62. +
df_in['NO2']/46.0055 + df_in['F']/18.9984 +
df_in['Br']/79904 +
(df_in['PO4']/94.971) / (1 + 10**(df_in['ph']-7.21))
)
is_add_a_org = (a_org > df_in['alkalinity']/61.02)
s_sum_anions.loc[is_add_a_org] = sum_ions + a_org
s_sum_anions.loc[~is_add_a_org] = sum_ions
if inplace:
self._obj['sum_anions'] = s_sum_anions
else:
return s_sum_anions
def get_sum_cations(self, inplace=True):
"""
Calculate sum of cations according to the Stuyfzand method.
Returns
-------
pandas.Series
Sum of all cations for each row in original SamplesFrame.
"""
cols_req = ('ph', 'Na', 'K', 'Ca', 'Mg', 'Fe', 'Mn', 'NH4', 'Al', 'Ba', 'Co', 'Cu', 'Li', 'Ni', 'Pb', 'Sr', 'Zn')
df_in = self._make_input_df(cols_req)
if 'Ca' == 0 and 'Mg' == 0:
abac = 2*'H_tot'
else:
abac = 0
s_sum_cations = 10**-(df_in['ph']-3) + \
df_in['Na']/22.99 + \
df_in['K']/39.1 + \
df_in['Ca']/20.04 + \
df_in['Mg']/12.156 + \
df_in['Fe']/(55.847/2) + \
df_in['Mn']/(54.938/2) + \
df_in['NH4']/18.04 + \
df_in['Al']/(26982/3) + \
abac + \
df_in['Ba']/137327 + \
df_in['Co']/58933 + \
df_in['Cu']/(63546/2) + \
df_in['Li']/6941 + \
df_in['Ni']/58693 + \
df_in['Pb']/207200 + \
df_in['Sr']/87620 + \
df_in['Zn']/65380
if inplace:
self._obj['sum_cations'] = s_sum_cations
else:
return s_sum_cations
def get_phreeq_columns(self):
"""
Returns the columns from the DataFrame that might be used
by PhreeqPython.
Returns
-------
list
Usable PhreeqPython columns
"""
df = self._obj
bicarbonate_in_columns = any([('hco' in _c.lower()) or
('bicarbona' in _c.lower())
for _c in df.columns])
alkalinity_in_columns = any(['alkalinity' in _c.lower()
for _c in df.columns])
if bicarbonate_in_columns:
if alkalinity_in_columns:
logging.warning('Warning: both bicarbonate (or hco3) and alkalinity are ' +
'defined as columns. Note that only alkalinity column is used')
else:
logging.warning('Warning: bicarbonate (or hco3) is found, but no alkalinity ' +
'is defined as columns. Note that only alkalinity column are used')
atom_columns = set(self._valid_atoms).intersection(df.columns)
ion_columns = set(self._valid_ions).intersection(df.columns)
prop_columns = set(self._valid_properties).intersection(df.columns)
phreeq_columns = list(atom_columns.union(ion_columns).union(prop_columns))
nitrogen_cols = set(phreeq_columns).intersection({'NO2', 'NO3', 'N', 'N_tot_k'})
phosphor_cols = set(phreeq_columns).intersection({'PO4', 'P', 'P_ortho', 'PO4_total'})
# check whether ph and temp are in the list
if 'ph' not in phreeq_columns:
raise ValueError('The required column ph is missing in the dataframe. ' +
'Add a column ph manually or consolidate ph_lab or ph_field ' +
'to ph by running the method DataFrame.hgc.consolidate().')
if 'temp' not in phreeq_columns:
raise ValueError('The required column temp is missing in the dataframe. ' +
'Add a column temp manually or consolidate temp_lab or temp_field ' +
'to temp by running the method DataFrame.hgc.consolidate().')
if 'doc' in phreeq_columns:
logging.info('DOC column found in samples frame while using phreeqc as backend; influence of DOC on any value calculated using phreeqc is ignored.')
if len(nitrogen_cols) > 1:
# check if nitrogen is defined in more than one column (per sample)
duplicate_nitrogen = df[nitrogen_cols].apply(lambda x: sum(x > 0) > 1, axis=1)
if sum(duplicate_nitrogen) > 0:
logging.info('Some rows have more than one column defining N. ' +
'Choose N over NO2 over NO3')
for index, row in df.loc[duplicate_nitrogen].iterrows():
for col in ['N', 'NO2', 'NO3']:
if col in nitrogen_cols:
if row[col] > 0:
df.loc[index, list(nitrogen_cols-{col})] = 0.
break
if len(phosphor_cols) > 1:
# check if phosphor is defined in more than one column (per sample)
duplicate_phosphor = df[phosphor_cols].apply(lambda x: sum(x > 0) > 1, axis=1)
if sum(duplicate_phosphor) > 0:
logging.info('Some rows have more than one column defining P. Choose P over PO4')
for index, row in df.loc[duplicate_phosphor].iterrows():
for col in ['P', 'PO4']:
if col in phosphor_cols:
if row[col] > 0:
df.loc[index, list(phosphor_cols-{col})] = 0.
break
return phreeq_columns
def get_phreeqpython_solutions(self, equilibrate_with='none', inplace=True):
"""
Return a series of `phreeqpython solutions <https://github.com/Vitens/phreeqpython>`_ derived from the (row)data in the SamplesFrame.
Parameters
----------
equilibrate_with : str, default 'none'
Ion to add for achieving charge equilibrium in the solutions.
inplace : bool, default True
Whether the returned series is added to the DataFrame or not (default: False).
Returns
-------
pandas.Series
"""
# `None` is also a valid argument and is translated to the strin `'none'`
if equilibrate_with is None:
equilibrate_with = 'none'
pp = self._pp
df = self._obj.copy()
phreeq_cols = self.get_phreeq_columns()
solutions = | pd.Series(index=df.index, dtype='object') | pandas.Series |
# -*- coding: utf-8 -*-
import os
import argparse
import datetime
import pandas as pd
from pyspark.sql import functions as f
from src.spark_session import spark
import setting
from src.utils import log_config, utils
logger = log_config.get_logger(__name__)
def ingest_raw_csv(raw_csv_filename=setting.nyc_raw_csv_filename,
storage_dir=setting.data_dir_raw,
cleanup=True, tip_amount_present=True):
"""
Ingests raw CSV file to pyspark dataframe
:param raw_csv_filename:
:param storage_dir:
:param cleanup:
:param tip_amount_present:
:return:
"""
nyc_raw_csv_filepath = os.path.join(storage_dir, raw_csv_filename)
logger.info("ingesting raw csv file from {}".format(nyc_raw_csv_filepath))
df_raw = spark.read.csv(path=nyc_raw_csv_filepath,
header=True, inferSchema=True)
if tip_amount_present:
df_col_names = [
'vendor_id', 'pickup_datetime', 'dropoff_datetime',
'store_and_fwd_flag', 'ratecode_id', 'pu_location_id',
'do_location_id', 'passenger_count', 'trip_distance',
'fare_amount', 'extra', 'mta_tax', 'tip_amount',
'tolls_amount', 'ehail_fee', 'improvement_surcharge',
'total_amount', 'payment_type', 'trip_type']
else:
df_col_names = [
'vendor_id', 'pickup_datetime', 'dropoff_datetime',
'store_and_fwd_flag', 'ratecode_id', 'pu_location_id',
'do_location_id', 'passenger_count', 'trip_distance',
'fare_amount', 'extra', 'mta_tax', 'tolls_amount',
'ehail_fee', 'improvement_surcharge', 'total_amount',
'payment_type', 'trip_type']
df_raw = df_raw.toDF(*df_col_names)
df_raw = df_raw.withColumn("pickup_datetime",
f.to_timestamp(df_raw.pickup_datetime,
'MM/dd/yyyy hh:mm:ss a'))
df_raw = df_raw.withColumn("dropoff_datetime",
f.to_timestamp(df_raw.dropoff_datetime,
'MM/dd/yyyy hh:mm:ss a'))
if cleanup:
# drop ehail fee, it is null in the entire dataset
df_raw = df_raw.drop('ehail_fee')
logger.info("ingested and cleaned raw csv file "
"{}".format(nyc_raw_csv_filepath))
return df_raw
def filter_and_persist_train_test_raw(df_raw):
"""
Filters and saves the January and February datasets
:param df_raw:
:return:
"""
df_raw_train_filepath = os.path.join(setting.data_dir_interim,
setting.raw_train_filename)
df_raw_test_filepath = os.path.join(setting.data_dir_interim,
setting.raw_test_filename)
df_raw_train, df_raw_test = filter_train_test(df_raw=df_raw)
logger.info("writing raw train and test files to {} and "
"{}".format(df_raw_train_filepath, df_raw_test_filepath))
df_raw_train.write.parquet(path=df_raw_train_filepath, mode="overwrite")
df_raw_test.write.parquet(path=df_raw_test_filepath, mode="overwrite")
def filter_train_test(df_raw):
train_date_start = pd.to_datetime(setting.train_date_start)
train_date_cutoff = \
pd.to_datetime(setting.train_date_end) \
+ datetime.timedelta(days=1)
test_date_start = pd.to_datetime(setting.test_date_start)
test_date_cutoff = \
| pd.to_datetime(setting.test_date_end) | pandas.to_datetime |
#!/usr/bin/env python3
# add rgb shading value based on the relative abundances of all pb transcripts
# of a gene
# %%
import pandas as pd
import math
import argparse
# examine all pb transcripts of a gene, determine rgb color
def calculate_rgb_shading(grp):
"""
Examine CPM for all PB transc
ripts of a gene and get rgb shading factor.
"""
# rgb scaling
rgb_scale = [
'0,0,0', '26,0,0', '51,0,0', '77,0,0', '102,0,0',
'128,0,0', '153,0,0', '179,0,0', '204,0,0', '230,0,0',
'255,0,0', '255,26,26', '255,51,51', '255,77,77', '255,102,102',
'255,128,128', '255,153,153', '255,179,179', '255,204,204', '255,230,230']
max_cpm = grp.cpm.max()
out_df = pd.DataFrame(columns = ['acc_full', 'pb_acc', 'cpm', 'fc', 'log2fc', 'log2fcx3', 'ceil_idx', 'rgb'])
for i, row in grp.iterrows():
cpm = row['cpm']
fc = float(max_cpm) / float(cpm)
log2fc = math.log(fc, 2)
log2fcx3 = log2fc * 3
ceil_idx = math.ceil(log2fcx3)
if ceil_idx > 19:
ceil_idx = 19
rgb = rgb_scale[ceil_idx]
out_df = out_df.append({'acc_full': row['acc_full'],
'pb_acc': row['pb_acc'],
'cpm': row['cpm'],
'fc': fc,
'log2fc': log2fc,
'log2fcx3': log2fcx3,
'ceil_idx': ceil_idx,
'rgb': rgb}, ignore_index=True)
# comment out line below to return all intermediate values
out_df = out_df[['acc_full', 'pb_acc', 'cpm', 'fc', 'rgb']]
return out_df
def add_rgb_shading(name, bed_file):
"""
Reads a BAM file containing CPM info to determine rgb color to use for track visualizatio
Parameters
----------
name : str
name of sample
bed_file : filename
file of bed cds to read
"""
bed = pd.read_table(bed_file, header=None)
bed[['gene', 'pb_acc', 'cpm']] = bed[3].str.split('|', expand=True)
bed = bed[bed.gene != '-']
bed = bed.rename(columns={3: 'acc_full'})
# subset df to determine rgb shading
subbed = bed[['acc_full', 'gene', 'pb_acc', 'cpm']].copy()
subbed['cpm'] = subbed['cpm'].astype(str).astype(float)
shaded = subbed.groupby('gene').apply(calculate_rgb_shading).reset_index()
# include rgb into original bed12
shaded['cpm_int'] = shaded['cpm'].apply(lambda x: str(round(x)).split('.')[0])
shaded['new_acc_full'] = shaded['gene'] + '|' + shaded['pb_acc'] + '|' + shaded['cpm_int'].astype(str)
# join in the rgb data and new accession
bed_shaded = | pd.merge(bed, shaded, how='left', on='acc_full') | pandas.merge |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from __future__ import division
from statsmodels.compat.python import range, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
import patsy
from collections import defaultdict
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.model as base
# used for wrapper:
import statsmodels.regression.linear_model as lm
import statsmodels.base.wrapper as wrap
from statsmodels.genmod import families
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import cov_struct as cov_structs
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
DomainWarning,
IterationLimitWarning,
ValueWarning)
import warnings
from statsmodels.graphics._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc)
from statsmodels.discrete.discrete_margins import (
_get_margeff_exog, _check_margeff_args, _effects_at, margeff_cov_with_se,
_check_at_is_all, _transform_names, _check_discrete_args,
_get_dummy_index, _get_count_index)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters
----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
_gee_init_doc = """
Marginal regression model fit using Generalized Estimating Equations.
GEE can be used to fit Generalized Linear Models (GLMs) when the
data have a grouped structure, and the observations are possibly
correlated within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous values (i.e. responses, outcomes,
dependent variables, or 'Y' values).
exog : array-like
2d array of exogeneous values (i.e. covariates, predictors,
independent variables, regressors, or 'X' values). A `nobs x
k` array where `nobs` is the number of observations and `k` is
the number of regressors. An intercept is not included by
default and should be added by the user. See
`statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
%(family_doc)s
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.cov_struct.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
update_dep : bool
If true, the dependence parameters are optimized, otherwise
they are held fixed at their starting values.
weights : array-like
An array of weights to use in the analysis. The weights must
be constant within each group. These correspond to
probability weights (pweights) in Stata.
%(extra_params)s
See Also
--------
statsmodels.genmod.families.family
:ref:`families`
:ref:`links`
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
The robust covariance provided here follows Liang and Zeger (1986)
and agrees with R's gee implementation. To obtain the robust
standard errors reported in Stata, multiply by sqrt(N / (N - g)),
where N is the total sample size, and g is the average group size.
Examples
--------
%(example)s
"""
_gee_family_doc = """\
The default is Gaussian. To specify the binomial
distribution use `family=sm.families.Binomial()`. Each family
can take a link instance as an argument. See
statsmodels.genmod.families.family for more information."""
_gee_ordinal_family_doc = """\
The only family supported is `Binomial`. The default `Logit`
link may be replaced with `probit` if desired."""
_gee_nominal_family_doc = """\
The default value `None` uses a multinomial logit family
specifically designed for use with GEE. Setting this
argument to a non-default value is not currently supported."""
_gee_fit_doc = """
Fits a marginal regression model using generalized estimating
equations (GEE).
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
cov_type : string
One of "robust", "naive", or "bias_reduced".
ddof_scale : scalar or None
The scale parameter is estimated as the sum of squared
Pearson residuals divided by `N - ddof_scale`, where N
is the total sample size. If `ddof_scale` is None, the
number of covariates (including an intercept if present)
is used.
scaling_factor : scalar
The estimated covariance of the parameter estimates is
scaled by this value. Default is 1, Stata uses N / (N - g),
where N is the total sample size and g is the average group
size.
Returns
-------
An instance of the GEEResults class or subclass
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
_gee_results_doc = """
Attributes
----------
cov_params_default : ndarray
default covariance of the parameter estimates. Is chosen among one
of the following three based on `cov_type`
cov_robust : ndarray
covariance of the parameter estimates that is robust
cov_naive : ndarray
covariance of the parameter estimates that is not robust to
correlation or variance misspecification
cov_robust_bc : ndarray
covariance of the parameter estimates that is robust and bias
reduced
converged : bool
indicator for convergence of the optimization.
True if the norm of the score is smaller than a threshold
cov_type : string
string indicating whether a "robust", "naive" or "bias_reduced"
covariance is used as default
fit_history : dict
Contains information about the iterations.
fittedvalues : array
Linear predicted values for the fitted model.
dot(exog, params)
model : class instance
Pointer to GEE model instance that called `fit`.
normalized_cov_params : array
See GEE docstring
params : array
The coefficients of the fitted model. Note that
interpretation of the coefficients often depends on the
distribution family and the data.
scale : float
The estimate of the scale / dispersion for the model fit.
See GEE.fit for more information.
score_norm : float
norm of the score at the end of the iterative estimation.
bse : array
The standard errors of the fitted GEE parameters.
"""
_gee_example = """
Logistic regression with autoregressive working dependence:
>>> import statsmodels.api as sm
>>> family = sm.families.Binomial()
>>> va = sm.cov_struct.Autoregressive()
>>> model = sm.GEE(endog, exog, group, family=family, cov_struct=va)
>>> result = model.fit()
>>> print(result.summary())
Use formulas to fit a Poisson GLM with independent working
dependence:
>>> import statsmodels.api as sm
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = sm.GEE.from_formula("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
Equivalent, using the formula API:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> fam = sm.families.Poisson()
>>> ind = sm.cov_struct.Independence()
>>> model = smf.gee("y ~ age + trt + base", "subject", \
data, cov_struct=ind, family=fam)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_ordinal_example = """
Fit an ordinal regression model using GEE, with "global
odds ratio" dependence:
>>> import statsmodels.api as sm
>>> gor = sm.cov_struct.GlobalOddsRatio("ordinal")
>>> model = sm.OrdinalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.formula.api as smf
>>> model = smf.ordinal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
_gee_nominal_example = """
Fit a nominal regression model using GEE:
>>> import statsmodels.api as sm
>>> import statsmodels.formula.api as smf
>>> gor = sm.cov_struct.GlobalOddsRatio("nominal")
>>> model = sm.NominalGEE(endog, exog, groups, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using formulas:
>>> import statsmodels.api as sm
>>> model = sm.NominalGEE.from_formula("y ~ x1 + x2", groups,
data, cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
Using the formula API:
>>> import statsmodels.formula.api as smf
>>> model = smf.nominal_gee("y ~ x1 + x2", groups, data,
cov_struct=gor)
>>> result = model.fit()
>>> print(result.summary())
"""
def _check_args(endog, exog, groups, time, offset, exposure):
if endog.size != exog.shape[0]:
raise ValueError("Leading dimension of 'exog' should match "
"length of 'endog'")
if groups.size != endog.size:
raise ValueError("'groups' and 'endog' should have the same size")
if time is not None and (time.size != endog.size):
raise ValueError("'time' and 'endog' should have the same size")
if offset is not None and (offset.size != endog.size):
raise ValueError("'offset and 'endog' should have the same size")
if exposure is not None and (exposure.size != endog.size):
raise ValueError("'exposure' and 'endog' should have the same size")
class GEE(base.Model):
__doc__ = (
" Estimation of marginal regression models using Generalized\n"
" Estimating Equations (GEE).\n" + _gee_init_doc %
{'extra_params': base._missing_param_doc,
'family_doc': _gee_family_doc,
'example': _gee_example})
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
exposure=None, dep_data=None, constraint=None,
update_dep=True, weights=None, **kwargs):
if family is not None:
if not isinstance(family.link, tuple(family.safe_links)):
import warnings
msg = ("The {0} link function does not respect the "
"domain of the {1} family.")
warnings.warn(msg.format(family.link.__class__.__name__,
family.__class__.__name__),
DomainWarning)
groups = np.asarray(groups) # in case groups is pandas
if "missing_idx" in kwargs and kwargs["missing_idx"] is not None:
# If here, we are entering from super.from_formula; missing
# has already been dropped from endog and exog, but not from
# the other variables.
ii = ~kwargs["missing_idx"]
groups = groups[ii]
if time is not None:
time = time[ii]
if offset is not None:
offset = offset[ii]
if exposure is not None:
exposure = exposure[ii]
del kwargs["missing_idx"]
_check_args(endog, exog, groups, time, offset, exposure)
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
self.update_dep = update_dep
self._fit_history = defaultdict(list)
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
exposure=exposure, weights=weights,
dep_data=dep_data, missing=missing,
**kwargs)
self._init_keys.extend(["update_dep", "constraint", "family",
"cov_struct"])
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = cov_structs.Independence()
else:
if not issubclass(cov_struct.__class__, cov_structs.CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
# Handle the offset and exposure
self._offset_exposure = None
if offset is not None:
self._offset_exposure = self.offset.copy()
self.offset = offset
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
if self._offset_exposure is not None:
self._offset_exposure += np.log(exposure)
else:
self._offset_exposure = np.log(exposure)
self.exposure = exposure
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError(
"GEE: the left hand side of the constraint must have "
"the same number of columns as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
if self._offset_exposure is not None:
self._offset_exposure += self.constraint.offset_increment()
else:
self._offset_exposure = (
self.constraint.offset_increment().copy())
self.exog = self.constraint.reduced_exog()
# Create list of row indices for each group
group_labels, ix = np.unique(self.groups, return_inverse=True)
se = pd.Series(index=np.arange(len(ix)))
gb = se.groupby(ix).groups
dk = [(lb, np.asarray(gb[k])) for k, lb in enumerate(group_labels)]
self.group_indices = dict(dk)
self.group_labels = group_labels
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
if self.weights is not None:
self.weights_li = self.cluster_list(self.weights)
self.weights_li = [x[0] for x in self.weights_li]
self.weights_li = np.asarray(self.weights_li)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:, None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
if self._offset_exposure is not None:
self.offset_li = self.cluster_list(self._offset_exposure)
else:
self.offset_li = None
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# The following are column based, not on rank see #1928
self.df_model = self.exog.shape[1] - 1 # assumes constant
self.df_resid = self.nobs - self.exog.shape[1]
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
maxgroup = max([len(x) for x in self.endog_li])
if maxgroup == 1:
self.update_dep = False
# Override to allow groups and time to be passed as variable
# names.
@classmethod
def from_formula(cls, formula, groups, data, subset=None,
time=None, offset=None, exposure=None,
*args, **kwargs):
"""
Create a GEE model instance from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
groups : array-like or string
Array of grouping labels. If a string, this is the name
of a variable in `data` that contains the grouping labels.
data : array-like
The data for the model.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of the data to used when
fitting the model.
time : array-like or string
The time values, used for dependence structures involving
distances between observations. If a string, this is the
name of a variable in `data` that contains the time
values.
offset : array-like or string
The offset values, added to the linear predictor. If a
string, this is the name of a variable in `data` that
contains the offset values.
exposure : array-like or string
The exposure values, only used if the link function is the
logarithm function, in which case the log of `exposure`
is added to the offset (if any). If a string, this is the
name of a variable in `data` that contains the offset
values.
%(missing_param_doc)s
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with two exceptions. `dep_data`
is processed as described below. The ``eval_env`` keyword is
passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace.
If you wish to use a "clean" environment set ``eval_env=-1``.
Optional arguments
------------------
dep_data : string or array-like
Data used for estimating the dependence structure. See
specific dependence structure classes (e.g. Nested) for
details. If `dep_data` is a string, it is interpreted as
a formula that is applied to `data`. If it is an array, it
must be an array of strings corresponding to column names in
`data`. Otherwise it must be an array-like with the same
number of rows as data.
Returns
-------
model : GEE model instance
Notes
-----
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
""" % {'missing_param_doc': base._missing_param_doc}
groups_name = "Groups"
if isinstance(groups, str):
groups_name = groups
groups = data[groups]
if isinstance(time, str):
time = data[time]
if isinstance(offset, str):
offset = data[offset]
if isinstance(exposure, str):
exposure = data[exposure]
dep_data = kwargs.get("dep_data")
dep_data_names = None
if dep_data is not None:
if isinstance(dep_data, str):
dep_data = patsy.dmatrix(dep_data, data,
return_type='dataframe')
dep_data_names = dep_data.columns.tolist()
else:
dep_data_names = list(dep_data)
dep_data = data[dep_data]
kwargs["dep_data"] = np.asarray(dep_data)
model = super(GEE, cls).from_formula(formula, data=data, subset=subset,
groups=groups, time=time,
offset=offset,
exposure=exposure,
*args, **kwargs)
if dep_data_names is not None:
model._dep_data_names = dep_data_names
model._groups_name = groups_name
return model
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def compare_score_test(self, submodel):
"""
Perform a score test for the given submodel against this model.
Parameters
----------
submodel : GEEResults instance
A fitted GEE model that is a submodel of this model.
Returns
-------
A dictionary with keys "statistic", "p-value", and "df",
containing the score test statistic, its chi^2 p-value,
and the degrees of freedom used to compute the p-value.
Notes
-----
The score test can be performed without calling 'fit' on the
larger model. The provided submodel must be obtained from a
fitted GEE.
This method performs the same score test as can be obtained by
fitting the GEE with a linear constraint and calling `score_test`
on the results.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
# Check consistency between model and submodel (not a comprehensive
# check)
submod = submodel.model
if self.exog.shape[0] != submod.exog.shape[0]:
msg = "Model and submodel have different numbers of cases."
raise ValueError(msg)
if self.exog.shape[1] == submod.exog.shape[1]:
msg = "Model and submodel have the same number of variables"
warnings.warn(msg)
if not isinstance(self.family, type(submod.family)):
msg = "Model and submodel have different GLM families."
warnings.warn(msg)
if not isinstance(self.cov_struct, type(submod.cov_struct)):
warnings.warn("Model and submodel have different GEE covariance "
"structures.")
if not np.equal(self.weights, submod.weights).all():
msg = "Model and submodel should have the same weights."
warnings.warn(msg)
# Get the positions of the submodel variables in the
# parent model
qm, qc = _score_test_submodel(self, submodel.model)
if qm is None:
msg = "The provided model is not a submodel."
raise ValueError(msg)
# Embed the submodel params into a params vector for the
# parent model
params_ex = np.dot(qm, submodel.params)
# Attempt to preserve the state of the parent model
cov_struct_save = self.cov_struct
import copy
cached_means_save = copy.deepcopy(self.cached_means)
# Get the score vector of the submodel params in
# the parent model
self.cov_struct = submodel.cov_struct
self.update_cached_means(params_ex)
_, score = self._update_mean_params()
if score is None:
msg = "Singular matrix encountered in GEE score test"
warnings.warn(msg, ConvergenceWarning)
return None
if not hasattr(self, "ddof_scale"):
self.ddof_scale = self.exog.shape[1]
if not hasattr(self, "scaling_factor"):
self.scaling_factor = 1
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = np.dot(qc.T, score) / scale
amat = np.linalg.inv(ncov1)
bmat_11 = np.dot(qm.T, np.dot(cmat, qm))
bmat_22 = np.dot(qc.T, np.dot(cmat, qc))
bmat_12 = np.dot(qm.T, np.dot(cmat, qc))
amat_11 = np.dot(qm.T, np.dot(amat, qm))
amat_12 = np.dot(qm.T, np.dot(amat, qc))
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
# Attempt to restore state
self.cov_struct = cov_struct_save
self.cached_means = cached_means_save
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
return {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
def estimate_scale(self):
"""
Estimate the dispersion/scale.
The scale parameter for binomial, Poisson, and multinomial
families is fixed at 1, otherwise it is estimated from
the data.
"""
if isinstance(self.family, (families.Binomial, families.Poisson,
_Multinomial)):
return 1.
endog = self.endog_li
cached_means = self.cached_means
nobs = self.nobs
varfunc = self.family.variance
scale = 0.
fsum = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
f = self.weights_li[i] if self.weights is not None else 1.
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / sdev
scale += f * np.sum(resid ** 2)
fsum += f * len(endog[i])
scale /= (fsum * (nobs - self.ddof_scale) / float(nobs))
return scale
def mean_deriv(self, exog, lin_pred):
"""
Derivative of the expected endog with respect to the parameters.
Parameters
----------
exog : array-like
The exogeneous data at which the derivative is computed.
lin_pred : array-like
The values of the linear predictor.
Returns
-------
The value of the derivative of the expected endog with respect
to the parameter vector.
Notes
-----
If there is an offset or exposure, it should be added to
`lin_pred` prior to calling this function.
"""
idl = self.family.link.inverse_deriv(lin_pred)
dmat = exog * idl[:, None]
return dmat
def mean_deriv_exog(self, exog, params, offset_exposure=None):
"""
Derivative of the expected endog with respect to exog.
Parameters
----------
exog : array-like
Values of the independent variables at which the derivative
is calculated.
params : array-like
Parameter values at which the derivative is calculated.
offset_exposure : array-like, optional
Combined offset and exposure.
Returns
-------
The derivative of the expected endog with respect to exog.
"""
lin_pred = np.dot(exog, params)
if offset_exposure is not None:
lin_pred += offset_exposure
idl = self.family.link.inverse_deriv(lin_pred)
dmat = np.outer(idl, params)
return dmat
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
score += f * np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
self._fit_history["cov_adjust"].append(
self.cov_struct.cov_adjust)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = np.dot(exog[i], mean_params)
if offset is not None:
lpr += offset[i]
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
cov_robust : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
cov_naive : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
f = self.weights_li[i] if self.weights is not None else 1.
bmat += f * np.dot(dmat.T, vinv_d)
dvinv_resid = f * np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
cov_naive = bmati * scale
cov_robust = np.dot(bmati, np.dot(cmat, bmati))
cov_naive *= self.scaling_factor
cov_robust *= self.scaling_factor
return cov_robust, cov_naive, cmat
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen.
def _bc_covmat(self, cov_naive):
cov_naive = cov_naive / self.scaling_factor
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
scale = self.estimate_scale()
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (dmat,))
if rslt is None:
return None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, cov_naive)
hmat = np.dot(hmat, dmat.T).T
f = self.weights_li[i] if self.weights is not None else 1.
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (aresid,))
if rslt is None:
return None
srt = rslt[0]
srt = f * np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
cov_robust_bc = np.dot(cov_naive, np.dot(bcm, cov_naive))
cov_robust_bc *= self.scaling_factor
return cov_robust_bc
def predict(self, params, exog=None, offset=None,
exposure=None, linear=False):
"""
Return predicted values for a marginal regression model fit
using GEE.
Parameters
----------
params : array-like
Parameters / coefficients of a marginal regression model.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
exposure : array-like, optional
Exposure for exog, if exposure is None, model exposure is
used. Only allowed if link function is the logarithm.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
Notes
-----
Using log(V) as the offset is equivalent to using V as the
exposure. If exposure U and offset V are both provided, then
log(U) + V is added to the linear predictor.
"""
# TODO: many paths through this, not well covered in tests
if exposure is not None:
if not isinstance(self.family.link, families.links.Log):
raise ValueError(
"exposure can only be used with the log link function")
# This is the combined offset and exposure
_offset = 0.
# Using model exog
if exog is None:
exog = self.exog
if not isinstance(self.family.link, families.links.Log):
# Don't need to worry about exposure
if offset is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure.copy()
else:
_offset = offset
else:
if offset is None and exposure is None:
if self._offset_exposure is not None:
_offset = self._offset_exposure
elif offset is None and exposure is not None:
_offset = np.log(exposure)
if hasattr(self, "offset"):
_offset = _offset + self.offset
elif offset is not None and exposure is None:
_offset = offset
if hasattr(self, "exposure"):
_offset = offset + np.log(self.exposure)
else:
_offset = offset + np.log(exposure)
# exog is provided: this is simpler than above because we
# never use model exog or exposure if exog is provided.
else:
if offset is not None:
_offset = _offset + offset
if exposure is not None:
_offset += np.log(exposure)
lin_pred = _offset + np.dot(exog, params)
if not linear:
return self.family.link.inverse(lin_pred)
return lin_pred
def _starting_params(self):
model = GLM(self.endog, self.exog, family=self.family,
offset=self._offset_exposure,
freq_weights=self.weights)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust', ddof_scale=None, scaling_factor=1.):
# Docstring attached below
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
self.scaling_factor = scaling_factor
self._fit_history = defaultdict(list)
if self.weights is not None and cov_type == 'naive':
raise ValueError("when using weights, cov_type may not be naive")
if start_params is None:
mean_params = self._starting_params()
else:
start_params = np.asarray(start_params)
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score ** 2))
self._fit_history['params'].append(mean_params.copy())
self._fit_history['score'].append(score)
self._fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if (del_params < ctol and
(num_assoc_updates > 0 or self.update_dep is False)):
break
# Update the dependence structure
if (self.update_dep and (itr % params_niter) == 0
and (itr >= first_dep_update)):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
bc_cov = None
if cov_type == "bias_reduced":
bc_cov = self._bc_covmat(ncov)
if self.constraint is not None:
x = mean_params.copy()
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
y, ncov = self._handle_constraint(x, ncov)
if y is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
if bc_cov is not None:
y, bc_cov = self._handle_constraint(x, bc_cov)
if x is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type=cov_type,
cov_robust=bcov,
cov_naive=ncov,
cov_robust_bc=bc_cov)
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale,
cov_type=cov_type, use_t=False,
attr_kwds=res_kwds)
# attributes not needed during results__init__
results.fit_history = self._fit_history
self.fit_history = defaultdict(list)
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
results.params_niter = params_niter
results.first_dep_update = first_dep_update
results.ctol = ctol
results.maxiter = maxiter
# These will be copied over to subclasses when upgrading.
results._props = ["cov_type", "use_t",
"cov_params_default", "cov_robust",
"cov_naive", "cov_robust_bc",
"fit_history",
"score_norm", "converged", "cov_struct",
"params_niter", "first_dep_update", "ctol",
"maxiter"]
return GEEResultsWrapper(results)
fit.__doc__ = _gee_fit_doc
def _update_regularized(self, params, pen_wt, scad_param, eps):
sn, hm = 0, 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid, ex))
sn0 = rslt[0]
sn += np.dot(ex.T, sn0)
hm0 = rslt[1]
hm += np.dot(ex.T, hm0)
# Wang et al. divide sn here by num_group, but that
# seems to be incorrect
ap = np.abs(params)
clipped = np.clip(scad_param * pen_wt - ap, 0, np.inf)
en = pen_wt * clipped * (ap > pen_wt)
en /= (scad_param - 1) * pen_wt
en += pen_wt * (ap <= pen_wt)
en /= eps + ap
hm.flat[::hm.shape[0] + 1] += self.num_group * en
hm *= self.estimate_scale()
sn -= self.num_group * en * params
return np.linalg.solve(hm, sn), hm
def _regularized_covmat(self, mean_params):
self.update_cached_means(mean_params)
ma = 0
for i in range(self.num_group):
expval, _ = self.cached_means[i]
resid = self.endog_li[i] - expval
sdev = np.sqrt(self.family.variance(expval))
ex = self.exog_li[i] * sdev[:, None]**2
rslt = self.cov_struct.covariance_matrix_solve(
expval, i, sdev, (resid,))
ma0 = np.dot(ex.T, rslt[0])
ma += np.outer(ma0, ma0)
return ma
def fit_regularized(self, pen_wt, scad_param=3.7, maxiter=100,
ddof_scale=None, update_assoc=5,
ctol=1e-5, ztol=1e-3, eps=1e-6):
"""
Regularized estimation for GEE.
Parameters
----------
pen_wt : float
The penalty weight (a non-negative scalar).
scad_param : float
Non-negative scalar determining the shape of the Scad
penalty.
maxiter : integer
The maximum number of iterations.
ddof_scale : integer
Value to subtract from `nobs` when calculating the
denominator degrees of freedom for t-statistics, defaults
to the number of columns in `exog`.
update_assoc : integer
The dependence parameters are updated every `update_assoc`
iterations of the mean structure parameter updates.
ctol : float
Convergence criterion, default is one order of magnitude
smaller than proposed in section 3.1 of Wang et al.
ztol : float
Coefficients smaller than this value are treated as
being zero, default is based on section 5 of Wang et al.
eps : non-negative scalar
Numerical constant, see section 3.2 of Wang et al.
Returns
-------
GEEResults instance. Note that not all methods of the results
class make sense when the model has been fit with regularization.
Notes
-----
This implementation assumes that the link is canonical.
References
----------
<NAME>, <NAME>, <NAME>. (2012). Penalized generalized estimating
equations for high-dimensional longitudinal data analysis.
Biometrics. 2012 Jun;68(2):353-60.
doi: 10.1111/j.1541-0420.2011.01678.x.
https://www.ncbi.nlm.nih.gov/pubmed/21955051
http://users.stat.umn.edu/~wangx346/research/GEE_selection.pdf
"""
mean_params = np.zeros(self.exog.shape[1])
self.update_cached_means(mean_params)
converged = False
fit_history = defaultdict(list)
# Subtract this number from the total sample size when
# normalizing the scale parameter estimate.
if ddof_scale is None:
self.ddof_scale = self.exog.shape[1]
else:
if not ddof_scale >= 0:
raise ValueError(
"ddof_scale must be a non-negative number or None")
self.ddof_scale = ddof_scale
for itr in range(maxiter):
update, hm = self._update_regularized(
mean_params, pen_wt, scad_param, eps)
if update is None:
msg = "Singular matrix encountered in regularized GEE update",
warnings.warn(msg, ConvergenceWarning)
break
if np.sqrt(np.sum(update**2)) < ctol:
converged = True
break
mean_params += update
fit_history['params'].append(mean_params.copy())
self.update_cached_means(mean_params)
if itr != 0 and (itr % update_assoc == 0):
self._update_assoc(mean_params)
if not converged:
msg = "GEE.fit_regularized did not converge"
warnings.warn(msg)
mean_params[np.abs(mean_params) < ztol] = 0
self._update_assoc(mean_params)
ma = self._regularized_covmat(mean_params)
cov = np.linalg.solve(hm, ma)
cov = np.linalg.solve(hm, cov.T)
# kwargs to add to results instance, need to be available in __init__
res_kwds = dict(cov_type="robust", cov_robust=cov)
scale = self.estimate_scale()
rslt = GEEResults(self, mean_params, cov, scale,
regularized=True, attr_kwds=res_kwds)
rslt.fit_history = fit_history
return GEEResultsWrapper(rslt)
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters
----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns
-------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale ** 2
score2 = score[red_p:] / scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - np.dot(amat_12.T,
np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects, returns dF(XB) / dX where F(.)
is the fitted mean.
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
# This form should be appropriate for group 1 probit, logit,
# logistic, cloglog, heckprob, xtprobit.
offset_exposure = None
if exog is None:
exog = self.exog
offset_exposure = self._offset_exposure
margeff = self.mean_deriv_exog(exog, params, offset_exposure)
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def qic(self, params, scale, cov_params):
"""
Returns quasi-information criteria and quasi-likelihood values.
Parameters
----------
params : array-like
The GEE estimates of the regression parameters.
scale : scalar
Estimated scale parameter
cov_params : array-like
An estimate of the covariance matrix for the
model parameters. Conventionally this is the robust
covariance matrix.
Returns
-------
ql : scalar
The quasi-likelihood value
qic : scalar
A QIC that can be used to compare the mean and covariance
structures of the model.
qicu : scalar
A simplified QIC that can be used to compare mean structures
but not covariance structures
Notes
-----
The quasi-likelihood used here is obtained by numerically evaluating
Wedderburn's integral representation of the quasi-likelihood function.
This approach is valid for all families and links. Many other
packages use analytical expressions for quasi-likelihoods that are
valid in special cases where the link function is canonical. These
analytical expressions may omit additive constants that only depend
on the data. Therefore, the numerical values of our QL and QIC values
will differ from the values reported by other packages. However only
the differences between two QIC values calculated for different models
using the same data are meaningful. Our QIC should produce the same
QIC differences as other software.
When using the QIC for models with unknown scale parameter, use a
common estimate of the scale parameter for all models being compared.
References
----------
.. [*] <NAME> (2001). Akaike's information criterion in generalized
estimating equations. Biometrics (57) 1.
"""
varfunc = self.family.variance
means = []
omega = 0.0
# omega^-1 is the model-based covariance assuming independence
for i in range(self.num_group):
expval, lpr = self.cached_means[i]
means.append(expval)
dmat = self.mean_deriv(self.exog_li[i], lpr)
omega += np.dot(dmat.T, dmat) / scale
means = np.concatenate(means)
# The quasi-likelihood, use change of variables so the integration is
# from -1 to 1.
du = means - self.endog
nstep = 10000
qv = np.empty(nstep)
xv = np.linspace(-0.99999, 1, nstep)
for i, g in enumerate(xv):
u = self.endog + (g + 1) * du / 2.0
vu = varfunc(u)
qv[i] = -np.sum(du**2 * (g + 1) / vu)
qv /= (4 * scale)
from scipy.integrate import trapz
ql = trapz(qv, dx=xv[1] - xv[0])
qicu = -2 * ql + 2 * self.exog.shape[1]
qic = -2 * ql + 2 * np.trace(np.dot(omega, cov_params))
return ql, qic, qicu
class GEEResults(base.LikelihoodModelResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model "
"using GEE.\n" + _gee_results_doc)
def __init__(self, model, params, cov_params, scale,
cov_type='robust', use_t=False, regularized=False,
**kwds):
super(GEEResults, self).__init__(
model, params, normalized_cov_params=cov_params,
scale=scale)
# not added by super
self.df_resid = model.df_resid
self.df_model = model.df_model
self.family = model.family
attr_kwds = kwds.pop('attr_kwds', {})
self.__dict__.update(attr_kwds)
# we don't do this if the cov_type has already been set
# subclasses can set it through attr_kwds
if not (hasattr(self, 'cov_type') and
hasattr(self, 'cov_params_default')):
self.cov_type = cov_type # keep alias
covariance_type = self.cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `cov_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if cov_type == "robust":
cov = self.cov_robust
elif cov_type == "naive":
cov = self.cov_naive
elif cov_type == "bias_reduced":
cov = self.cov_robust_bc
self.cov_params_default = cov
else:
if self.cov_type != cov_type:
raise ValueError('cov_type in argument is different from '
'already attached cov_type')
def standard_errors(self, cov_type="robust"):
"""
This is a convenience function that returns the standard
errors for any covariance type. The value of `bse` is the
standard errors for whichever covariance type is specified as
an argument to `fit` (defaults to "robust").
Parameters
----------
cov_type : string
One of "robust", "naive", or "bias_reduced". Determines
the covariance used to compute standard errors. Defaults
to "robust".
"""
# Check covariance_type
covariance_type = cov_type.lower()
allowed_covariances = ["robust", "naive", "bias_reduced"]
if covariance_type not in allowed_covariances:
msg = ("GEE: `covariance_type` must be one of " +
", ".join(allowed_covariances))
raise ValueError(msg)
if covariance_type == "robust":
return np.sqrt(np.diag(self.cov_robust))
elif covariance_type == "naive":
return np.sqrt(np.diag(self.cov_naive))
elif covariance_type == "bias_reduced":
if self.cov_robust_bc is None:
raise ValueError(
"GEE: `bias_reduced` covariance not available")
return np.sqrt(np.diag(self.cov_robust_bc))
# Need to override to allow for different covariance types.
@cache_readonly
def bse(self):
return self.standard_errors(self.cov_type)
@cache_readonly
def resid(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model.
"""
return self.model.endog - self.fittedvalues
def score_test(self):
"""
Return the results of a score test for a linear constraint.
Returns
-------
Adictionary containing the p-value, the test statistic,
and the degrees of freedom for the score test.
Notes
-----
See also GEE.compare_score_test for an alternative way to perform
a score test. GEEResults.score_test is more general, in that it
supports testing arbitrary linear equality constraints. However
GEE.compare_score_test might be easier to use when comparing
two explicit models.
References
----------
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
"""
if not hasattr(self.model, "score_test_results"):
msg = "score_test on results instance only available when "
msg += " model was fit with constraints"
raise ValueError(msg)
return self.model.score_test_results
@cache_readonly
def resid_split(self):
"""
Returns the residuals, the endogeneous data minus the fitted
values from the model. The residuals are returned as a list
of arrays containing the residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.resid[ii])
return sresid
@cache_readonly
def resid_centered(self):
"""
Returns the residuals centered within each group.
"""
cresid = self.resid.copy()
for v in self.model.group_labels:
ii = self.model.group_indices[v]
cresid[ii] -= cresid[ii].mean()
return cresid
@cache_readonly
def resid_centered_split(self):
"""
Returns the residuals centered within each group. The
residuals are returned as a list of arrays containing the
centered residuals for each cluster.
"""
sresid = []
for v in self.model.group_labels:
ii = self.model.group_indices[v]
sresid.append(self.centered_resid[ii])
return sresid
def qic(self, scale=None):
"""
Returns the QIC and QICu information criteria.
For families with a scale parameter (e.g. Gaussian), provide
as the scale argument the estimated scale from the largest
model under consideration.
If the scale parameter is not provided, the estimated scale
parameter is used. Doing this does not allow comparisons of
QIC values between models.
"""
# It is easy to forget to set the scale parameter. Sometimes
# this is intentional, so we warn.
if scale is None:
warnings.warn("QIC values obtained using scale=None are not "
"appropriate for comparing models")
if scale is None:
scale = self.scale
_, qic, qicu = self.model.qic(self.params, scale,
self.cov_params())
return qic, qicu
# FIXME: alias to be removed, temporary backwards compatibility
split_resid = resid_split
centered_resid = resid_centered
split_centered_resid = resid_centered_split
@cache_readonly
def resid_response(self):
return self.model.endog - self.fittedvalues
@cache_readonly
def resid_pearson(self):
val = self.model.endog - self.fittedvalues
val = val / np.sqrt(self.family.variance(self.fittedvalues))
return val
@cache_readonly
def resid_working(self):
val = self.resid_response
val = val * self.family.link.deriv(self.fittedvalues)
return val
@cache_readonly
def resid_anscombe(self):
return self.family.resid_anscombe(self.model.endog, self.fittedvalues)
@cache_readonly
def resid_deviance(self):
return self.family.resid_dev(self.model.endog, self.fittedvalues)
@cache_readonly
def fittedvalues(self):
"""
Returns the fitted values from the model.
"""
return self.model.family.link.inverse(np.dot(self.model.exog,
self.params))
def plot_added_variable(self, focus_exog, resid_type=None,
use_glm_weights=True, fit_kwargs=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_added_variable
fig = plot_added_variable(self, focus_exog,
resid_type=resid_type,
use_glm_weights=use_glm_weights,
fit_kwargs=fit_kwargs, ax=ax)
return fig
plot_added_variable.__doc__ = _plot_added_variable_doc % {
'extra_params_doc': ''}
def plot_partial_residuals(self, focus_exog, ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_partial_residuals
return plot_partial_residuals(self, focus_exog, ax=ax)
plot_partial_residuals.__doc__ = _plot_partial_residuals_doc % {
'extra_params_doc': ''}
def plot_ceres_residuals(self, focus_exog, frac=0.66, cond_means=None,
ax=None):
# Docstring attached below
from statsmodels.graphics.regressionplots import plot_ceres_residuals
return plot_ceres_residuals(self, focus_exog, frac,
cond_means=cond_means, ax=ax)
plot_ceres_residuals.__doc__ = _plot_ceres_residuals_doc % {
'extra_params_doc': ''}
def conf_int(self, alpha=.05, cols=None, cov_type=None):
"""
Returns confidence intervals for the fitted parameters.
Parameters
----------
alpha : float, optional
The `alpha` level for the confidence interval. i.e., The
default `alpha` = .05 returns a 95% confidence interval.
cols : array-like, optional
`cols` specifies which confidence intervals to return
cov_type : string
The covariance type used for computing standard errors;
must be one of 'robust', 'naive', and 'bias reduced'.
See `GEE` for details.
Notes
-----
The confidence interval is based on the Gaussian distribution.
"""
# super doesn't allow to specify cov_type and method is not
# implemented,
# FIXME: remove this method here
if cov_type is None:
bse = self.bse
else:
bse = self.standard_errors(cov_type=cov_type)
params = self.params
dist = stats.norm
q = dist.ppf(1 - alpha / 2)
if cols is None:
lower = self.params - q * bse
upper = self.params + q * bse
else:
cols = np.asarray(cols)
lower = params[cols] - q * bse[cols]
upper = params[cols] + q * bse[cols]
return np.asarray(lzip(lower, upper))
def summary(self, yname=None, xname=None, title=None, alpha=.05):
"""
Summarize the GEE regression results
Parameters
----------
yname : string, optional
Default is `y`
xname : list of strings, optional
Default is `var_##` for ## in p the number of regressors
title : string, optional
Title for the top table. If not None, then this replaces
the default title
alpha : float
significance level for the confidence intervals
cov_type : string
The covariance type used to compute the standard errors;
one of 'robust' (the usual robust sandwich-type covariance
estimate), 'naive' (ignores dependence), and 'bias
reduced' (the Mancl/DeRouen estimate).
Returns
-------
smry : Summary instance
this holds the summary tables and text, which can be
printed or converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary : class to hold summary results
"""
top_left = [('Dep. Variable:', None),
('Model:', None),
('Method:', ['Generalized']),
('', ['Estimating Equations']),
('Family:', [self.model.family.__class__.__name__]),
('Dependence structure:',
[self.model.cov_struct.__class__.__name__]),
('Date:', None),
('Covariance type: ', [self.cov_type, ])
]
NY = [len(y) for y in self.model.endog_li]
top_right = [('No. Observations:', [sum(NY)]),
('No. clusters:', [len(self.model.endog_li)]),
('Min. cluster size:', [min(NY)]),
('Max. cluster size:', [max(NY)]),
('Mean cluster size:', ["%.1f" % np.mean(NY)]),
('Num. iterations:', ['%d' %
len(self.fit_history['params'])]),
('Scale:', ["%.3f" % self.scale]),
('Time:', None),
]
# The skew of the residuals
skew1 = stats.skew(self.resid)
kurt1 = stats.kurtosis(self.resid)
skew2 = stats.skew(self.centered_resid)
kurt2 = stats.kurtosis(self.centered_resid)
diagn_left = [('Skew:', ["%12.4f" % skew1]),
('Centered skew:', ["%12.4f" % skew2])]
diagn_right = [('Kurtosis:', ["%12.4f" % kurt1]),
('Centered kurtosis:', ["%12.4f" % kurt2])
]
if title is None:
title = self.model.__class__.__name__ + ' ' +\
"Regression Results"
# Override the exog variable names if xname is provided as an
# argument.
if xname is None:
xname = self.model.exog_names
if yname is None:
yname = self.model.endog_names
# Create summary table instance
from statsmodels.iolib.summary import Summary
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
yname=yname, xname=xname,
title=title)
smry.add_table_params(self, yname=yname, xname=xname,
alpha=alpha, use_t=False)
smry.add_table_2cols(self, gleft=diagn_left,
gright=diagn_right, yname=yname,
xname=xname, title="")
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation. If `at` is 'all'
only margeff will be available.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
if self.model.constraint is not None:
warnings.warn("marginal effects ignore constraints",
ValueWarning)
return GEEMargins(self, (at, method, atexog, dummy, count))
def plot_isotropic_dependence(self, ax=None, xpoints=10,
min_n=50):
"""
Create a plot of the pairwise products of within-group
residuals against the corresponding time differences. This
plot can be used to assess the possible form of an isotropic
covariance structure.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
xpoints : scalar or array-like
If scalar, the number of points equally spaced points on
the time difference axis used to define bins for
calculating local means. If an array, the specific points
that define the bins.
min_n : integer
The minimum sample size in a bin for the mean residual
product to be included on the plot.
"""
from statsmodels.graphics import utils as gutils
resid = self.model.cluster_list(self.resid)
time = self.model.cluster_list(self.model.time)
# All within-group pairwise time distances (xdt) and the
# corresponding products of scaled residuals (xre).
xre, xdt = [], []
for re, ti in zip(resid, time):
ix = np.tril_indices(re.shape[0], 0)
re = re[ix[0]] * re[ix[1]] / self.scale ** 2
xre.append(re)
dists = np.sqrt(((ti[ix[0], :] - ti[ix[1], :]) ** 2).sum(1))
xdt.append(dists)
xre = np.concatenate(xre)
xdt = np.concatenate(xdt)
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# Convert to a correlation
ii = np.flatnonzero(xdt == 0)
v0 = np.mean(xre[ii])
xre /= v0
# Use the simple average to smooth, since fancier smoothers
# that trim and downweight outliers give biased results (we
# need the actual mean of a skewed distribution).
if np.isscalar(xpoints):
xpoints = np.linspace(0, max(xdt), xpoints)
dg = np.digitize(xdt, xpoints)
dgu = np.unique(dg)
hist = np.asarray([np.sum(dg == k) for k in dgu])
ii = np.flatnonzero(hist >= min_n)
dgu = dgu[ii]
dgy = np.asarray([np.mean(xre[dg == k]) for k in dgu])
dgx = np.asarray([np.mean(xdt[dg == k]) for k in dgu])
ax.plot(dgx, dgy, '-', color='orange', lw=5)
ax.set_xlabel("Time difference")
ax.set_ylabel("Product of scaled residuals")
return fig
def sensitivity_params(self, dep_params_first,
dep_params_last, num_steps):
"""
Refits the GEE model using a sequence of values for the
dependence parameters.
Parameters
----------
dep_params_first : array-like
The first dep_params in the sequence
dep_params_last : array-like
The last dep_params in the sequence
num_steps : int
The number of dep_params in the sequence
Returns
-------
results : array-like
The GEEResults objects resulting from the fits.
"""
model = self.model
import copy
cov_struct = copy.deepcopy(self.model.cov_struct)
# We are fixing the dependence structure in each run.
update_dep = model.update_dep
model.update_dep = False
dep_params = []
results = []
for x in np.linspace(0, 1, num_steps):
dp = x * dep_params_last + (1 - x) * dep_params_first
dep_params.append(dp)
model.cov_struct = copy.deepcopy(cov_struct)
model.cov_struct.dep_params = dp
rslt = model.fit(start_params=self.params,
ctol=self.ctol,
params_niter=self.params_niter,
first_dep_update=self.first_dep_update,
cov_type=self.cov_type)
results.append(rslt)
model.update_dep = update_dep
return results
# FIXME: alias to be removed, temporary backwards compatibility
params_sensitivity = sensitivity_params
class GEEResultsWrapper(lm.RegressionResultsWrapper):
_attrs = {
'centered_resid': 'rows',
}
_wrap_attrs = wrap.union_dicts(lm.RegressionResultsWrapper._wrap_attrs,
_attrs)
wrap.populate_wrapper(GEEResultsWrapper, GEEResults) # noqa:E305
class OrdinalGEE(GEE):
__doc__ = (
" Estimation of ordinal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_ordinal_family_doc,
'example': _gee_ordinal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
if family is None:
family = families.Binomial()
else:
if not isinstance(family, families.Binomial):
raise ValueError("ordinal GEE must use a Binomial family")
if cov_struct is None:
cov_struct = cov_structs.OrdinalIndependence()
endog, exog, groups, time, offset = self.setup_ordinal(
endog, exog, groups, time, offset)
super(OrdinalGEE, self).__init__(endog, exog, groups, time,
family, cov_struct, missing,
offset, dep_data, constraint)
def setup_ordinal(self, endog, exog, groups, time, offset):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
nrows = ncut * len(endog)
exog_out = np.zeros((nrows, exog.shape[1]),
dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
intercepts = np.zeros((nrows, ncut), dtype=np.float64)
groups_out = np.zeros(nrows, dtype=groups.dtype)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
exog_out[jrow, :] = exog_row
endog_out[jrow] = (int(endog_value > thresh))
intercepts[jrow, thresh_ix] = 1
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
exog_out = np.concatenate((intercepts, exog_out), axis=1)
# exog column names, including intercepts
xnames = ["I(y>%.1f)" % v for v in endog_cuts]
if type(self.exog_orig) == pd.DataFrame:
xnames.extend(self.exog_orig.columns)
else:
xnames.extend(["x%d" % k for k in range(1, exog.shape[1] + 1)])
exog_out = pd.DataFrame(exog_out, columns=xnames)
# Preserve the endog name if there is one
if type(self.endog_orig) == pd.Series:
endog_out = pd.Series(endog_out, name=self.endog_orig.name)
return endog_out, exog_out, groups_out, time_out, offset_out
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
cov_type='robust'):
rslt = super(OrdinalGEE, self).fit(maxiter, ctol, start_params,
params_niter, first_dep_update,
cov_type=cov_type)
rslt = rslt._results # use unwrapped instance
res_kwds = dict(((k, getattr(rslt, k)) for k in rslt._props))
# Convert the GEEResults to an OrdinalGEEResults
ord_rslt = OrdinalGEEResults(self, rslt.params,
rslt.cov_params() / rslt.scale,
rslt.scale,
cov_type=cov_type,
attr_kwds=res_kwds)
# for k in rslt._props:
# setattr(ord_rslt, k, getattr(rslt, k))
return OrdinalGEEResultsWrapper(ord_rslt)
fit.__doc__ = _gee_fit_doc
class OrdinalGEEResults(GEEResults):
__doc__ = (
"This class summarizes the fit of a marginal regression model"
"for an ordinal response using GEE.\n"
+ _gee_results_doc)
def plot_distribution(self, ax=None, exog_values=None):
"""
Plot the fitted probabilities of endog in an ordinal model,
for specifed values of the predictors.
Parameters
----------
ax : Matplotlib axes instance
An axes on which to draw the graph. If None, new
figure and axes objects are created
exog_values : array-like
A list of dictionaries, with each dictionary mapping
variable names to values at which the variable is held
fixed. The values P(endog=y | exog) are plotted for all
possible values of y, at the given exog value. Variables
not included in a dictionary are held fixed at the mean
value.
Example:
--------
We have a model with covariates 'age' and 'sex', and wish to
plot the probabilities P(endog=y | exog) for males (sex=0) and
for females (sex=1), as separate paths on the plot. Since
'age' is not included below in the map, it is held fixed at
its mean value.
>>> ev = [{"sex": 1}, {"sex": 0}]
>>> rslt.distribution_plot(exog_values=ev)
"""
from statsmodels.graphics import utils as gutils
if ax is None:
fig, ax = gutils.create_mpl_ax(ax)
else:
fig = ax.get_figure()
# If no covariate patterns are specified, create one with all
# variables set to their mean values.
if exog_values is None:
exog_values = [{}, ]
exog_means = self.model.exog.mean(0)
ix_icept = [i for i, x in enumerate(self.model.exog_names) if
x.startswith("I(")]
for ev in exog_values:
for k in ev.keys():
if k not in self.model.exog_names:
raise ValueError("%s is not a variable in the model"
% k)
# Get the fitted probability for each level, at the given
# covariate values.
pr = []
for j in ix_icept:
xp = np.zeros_like(self.params)
xp[j] = 1.
for i, vn in enumerate(self.model.exog_names):
if i in ix_icept:
continue
# User-specified value
if vn in ev:
xp[i] = ev[vn]
# Mean value
else:
xp[i] = exog_means[i]
p = 1 / (1 + np.exp(-np.dot(xp, self.params)))
pr.append(p)
pr.insert(0, 1)
pr.append(0)
pr = np.asarray(pr)
prd = -np.diff(pr)
ax.plot(self.model.endog_values, prd, 'o-')
ax.set_xlabel("Response value")
ax.set_ylabel("Probability")
ax.set_ylim(0, 1)
return fig
def _score_test_submodel(par, sub):
"""
Return transformation matrices for design matrices.
Parameters
----------
par : instance
The parent model
sub : instance
The sub-model
Returns
-------
qm : array-like
Matrix mapping the design matrix of the parent to the design matrix
for the sub-model.
qc : array-like
Matrix mapping the design matrix of the parent to the orthogonal
complement of the columnspace of the submodel in the columnspace
of the parent.
Notes
-----
Returns None, None if the provided submodel is not actually a submodel.
"""
x1 = par.exog
x2 = sub.exog
u, s, vt = np.linalg.svd(x1, 0)
# Get the orthogonal complement of col(x2) in col(x1).
a, _, _ = np.linalg.svd(x2, 0)
a = u - np.dot(a, np.dot(a.T, u))
x2c, sb, _ = np.linalg.svd(a, 0)
x2c = x2c[:, sb > 1e-12]
# x1 * qm = x2
qm = np.dot(vt.T, np.dot(u.T, x2) / s[:, None])
e = np.max(np.abs(x2 - np.dot(x1, qm)))
if e > 1e-8:
return None, None
# x1 * qc = x2c
qc = np.dot(vt.T, np.dot(u.T, x2c) / s[:, None])
return qm, qc
class OrdinalGEEResultsWrapper(GEEResultsWrapper):
pass
wrap.populate_wrapper(OrdinalGEEResultsWrapper, OrdinalGEEResults) # noqa:E305
class NominalGEE(GEE):
__doc__ = (
" Estimation of nominal response marginal regression models\n"
" using Generalized Estimating Equations (GEE).\n" +
_gee_init_doc % {'extra_params': base._missing_param_doc,
'family_doc': _gee_nominal_family_doc,
'example': _gee_nominal_example})
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None, **kwargs):
endog, exog, groups, time, offset = self.setup_nominal(
endog, exog, groups, time, offset)
if family is None:
family = _Multinomial(self.ncut + 1)
if cov_struct is None:
cov_struct = cov_structs.NominalIndependence()
super(NominalGEE, self).__init__(
endog, exog, groups, time, family, cov_struct, missing,
offset, dep_data, constraint)
def _starting_params(self):
model = GEE(self.endog, self.exog, self.groups,
time=self.time, family=families.Binomial(),
offset=self.offset, exposure=self.exposure)
result = model.fit()
return result.params
def setup_nominal(self, endog, exog, groups, time, offset):
"""
Restructure nominal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = endog.copy()
self.exog_orig = exog.copy()
self.groups_orig = groups.copy()
if offset is not None:
self.offset_orig = offset.copy()
else:
self.offset_orig = None
offset = np.zeros(len(endog))
if time is not None:
self.time_orig = time.copy()
else:
self.time_orig = None
time = np.zeros((len(endog), 1))
exog = np.asarray(exog)
endog = np.asarray(endog)
groups = np.asarray(groups)
time = np.asarray(time)
offset = np.asarray(offset)
# The unique outcomes, except the greatest one.
self.endog_values = np.unique(endog)
endog_cuts = self.endog_values[0:-1]
ncut = len(endog_cuts)
self.ncut = ncut
nrows = len(endog_cuts) * exog.shape[0]
ncols = len(endog_cuts) * exog.shape[1]
exog_out = np.zeros((nrows, ncols), dtype=np.float64)
endog_out = np.zeros(nrows, dtype=np.float64)
groups_out = np.zeros(nrows, dtype=np.float64)
time_out = np.zeros((nrows, time.shape[1]),
dtype=np.float64)
offset_out = np.zeros(nrows, dtype=np.float64)
jrow = 0
zipper = zip(exog, endog, groups, time, offset)
for (exog_row, endog_value, group_value, time_value,
offset_value) in zipper:
# Loop over thresholds for the indicators
for thresh_ix, thresh in enumerate(endog_cuts):
u = np.zeros(len(endog_cuts), dtype=np.float64)
u[thresh_ix] = 1
exog_out[jrow, :] = np.kron(u, exog_row)
endog_out[jrow] = (int(endog_value == thresh))
groups_out[jrow] = group_value
time_out[jrow] = time_value
offset_out[jrow] = offset_value
jrow += 1
# exog names
if isinstance(self.exog_orig, pd.DataFrame):
xnames_in = self.exog_orig.columns
else:
xnames_in = ["x%d" % k for k in range(1, exog.shape[1] + 1)]
xnames = []
for tr in endog_cuts:
xnames.extend(["%s[%.1f]" % (v, tr) for v in xnames_in])
exog_out = pd.DataFrame(exog_out, columns=xnames)
exog_out = | pd.DataFrame(exog_out, columns=xnames) | pandas.DataFrame |
# County Housing Vacancy Raw Numbers
# Source: Census (census.data.gov) advanced search (Topics: 'Housing-Vacancy-Vacancy Rates' ('Vacancy Status' tabl); Geography: All US Counties; Years: 2010-2018 ACS 5-Yr. Estimates)
import pandas as pd
import numpy as np
import os
master_df = | pd.DataFrame() | pandas.DataFrame |
import pickle
from tqdm import tqdm
import numpy as np
import pandas as pd
from nltk.util import ngrams
from nltk import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
from nltk.lm.preprocessing import padded_everygram_pipeline, pad_both_ends
from nltk.lm import NgramCounter, Vocabulary, MLE, Lidstone, WittenBellInterpolated, KneserNeyInterpolated
from nltk.lm.models import InterpolatedLanguageModel
from nltk.lm.smoothing import WittenBell
from nltk.lm.api import Smoothing
from scipy.special import softmax
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
from symspellpy.symspellpy import SymSpell, Verbosity
from scipy.stats import poisson
import itertools
from hyperopt import fmin, tpe, hp
from jiwer import wer
# modifications on NLTK
class MLidstone(Lidstone):
"""Provides (modified from NLTK) Lidstone-smoothed scores."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.vocab_len = len(self.vocab)
def unmasked_score(self, word, context=None):
"""Modified to use vocab_len to store length of vocabulary
Results in much faster implementation
"""
counts = self.context_counts(context)
word_count = counts[word]
norm_count = counts.N()
return (word_count + self.gamma) / (norm_count + self.vocab_len * self.gamma)
def _count_non_zero_vals(dictionary):
return sum(1.0 for c in dictionary.values() if c > 0)
class MKneserNey(Smoothing):
"""Kneser-Ney Smoothing."""
def __init__(self, vocabulary, counter, discount=0.1, **kwargs):
super().__init__(vocabulary, counter, **kwargs)
self.discount = discount
self.vocab_len = len(self.vocab)
def unigram_score(self, word):
return 1.0 / self.vocab_len
def alpha_gamma(self, word, context):
prefix_counts = self.counts[context]
prefix_total_ngrams = prefix_counts.N()
if prefix_total_ngrams:
alpha = max(prefix_counts[word] - self.discount, 0.0) / prefix_total_ngrams
gamma = (
self.discount * _count_non_zero_vals(prefix_counts) / prefix_total_ngrams
)
else:
alpha, gamma = 0, 1
return alpha, gamma
class MKneserNeyInterpolated(InterpolatedLanguageModel):
"""(modified from NLTK) Interpolated version of Kneser-Ney smoothing."""
def __init__(self, order, discount=0.1, **kwargs):
super().__init__(MKneserNey, order, params={"discount": discount}, **kwargs)
class MWittenBell(WittenBell):
"""(modified from NLTK) Witten-Bell smoothing."""
def __init__(self, vocabulary, counter, **kwargs):
super().__init__(vocabulary, counter, **kwargs)
self.countsdb = {}
for i in range(10):
self.countsdb[i] = self.counts[i].N()
def gamma(self, context):
n_plus = _count_non_zero_vals(self.counts[context])
return n_plus / (n_plus + self.countsdb[len(context) + 1])
class MWittenBellInterpolated(InterpolatedLanguageModel):
"""(modified from NLTK) Interpolated version of Witten-Bell smoothing."""
def __init__(self, order, **kwargs):
super().__init__(MWittenBell, order, **kwargs)
# Helper function for training a ngram
def count_ngrams_and_vocab(corpus, n=3, unk_cutoff=10):
tokenized_text = [list(map(str.lower, word_tokenize(sent))) for sent in corpus]
training_ngrams, padded_sents = padded_everygram_pipeline(n, tokenized_text)
return NgramCounter(training_ngrams), Vocabulary(padded_sents, unk_cutoff=unk_cutoff)
def train_ngram_lm(corpus, models, n=3, a=0.0015, unk_cutoff=10, discount=0.1):
tokenized_text = [list(map(str.lower, word_tokenize(sent))) for sent in corpus]
training_ngrams, padded_sents = padded_everygram_pipeline(n, tokenized_text)
vocab = Vocabulary(padded_sents, unk_cutoff=unk_cutoff)
lms = []
for model in models:
training_ngrams, padded_sents = padded_everygram_pipeline(n, tokenized_text)
if model == '<NAME>':
lm = MKneserNeyInterpolated(order=n, discount=discount, vocabulary=vocab)
elif model == 'WBI':
lm = MWittenBellInterpolated(order=n, vocabulary=vocab)
elif model == 'Lidstone':
lm = MLidstone(gamma=a, order=n, vocabulary=vocab)
lm.fit(training_ngrams)
lms += [lm]
return lms
def train_ngram_lm(tokenized_text, models, n=3, a=0.0015, unk_cutoff=10, discount=0.1):
training_ngrams, padded_sents = padded_everygram_pipeline(n, tokenized_text)
vocab = Vocabulary(padded_sents, unk_cutoff=unk_cutoff)
lms = []
for model in models:
training_ngrams, padded_sents = padded_everygram_pipeline(n, tokenized_text)
if model == '<NAME>ey':
lm = MKneserNeyInterpolated(order=n, discount=discount, vocabulary=vocab)
elif model == 'WBI':
lm = MWittenBellInterpolated(order=n, vocabulary=vocab)
elif model == 'Lidstone':
lm = MLidstone(gamma=a, order=n, vocabulary=vocab)
lm.fit(training_ngrams)
lms += [lm]
return lms
# ngram Tokenizer
class ngramTokenizer():
def __init__(self, lm):
self.bos_token = '<s>'
self.eos_token = '</s>'
self.unk_token = lm.vocab.unk_label
self.order = lm.order
def encode(self, sentence):
return tuple(pad_both_ends(tuple(map(str.lower, word_tokenize(sentence))), self.order))
def decode(self, sentence):
detokenize = TreebankWordDetokenizer().detokenize
content = []
for token in sentence:
if token == self.bos_token:
continue
if token == self.eos_token:
break
content.append(token)
return detokenize(content)
# Noisy Channel Model: Beam Search, Viterbi, Poisson Channel Model, inversely proportional to edit distances channel model
class NoisyChannelModel():
def __init__(self, lm, max_ed=4, prefix_length=7, l=1, channel_method_poisson=True, channel_prob_param=0.02):
self.show_progress = False
self.lm = lm
self.l = l
self.channel_method_poisson = channel_method_poisson
self.channel_prob_param = channel_prob_param
self.sym_spell = SymSpell(max_ed, prefix_length)
if isinstance(self.lm, GPT2LMHeadModel):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.lm_sent_logscore = self.gpt2_sent_logscore
self.beam_init = self.beam_GPT_init
self.skipstart = 1
self.skipend = -1
self.update_sentence_history = self.updateGPT2history
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
for subword in range(self.tokenizer.vocab_size):
self.sym_spell.create_dictionary_entry(key=self.tokenizer.decode(subword), count=1)
else:
self.lm_sent_logscore = self.ngram_sent_logscore
self.beam_init = self.beam_ngram_init
self.skipstart = self.lm.order-1
self.skipend = None
self.update_sentence_history = self.updatengramhistory
self.tokenizer = ngramTokenizer(self.lm)
for word in lm.vocab:
self.sym_spell.create_dictionary_entry(key=word, count=self.lm.counts[word])
def GPTrun(self, indexed_tokens, past=None):
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(self.device)
with torch.no_grad():
return self.lm(tokens_tensor, past=past, labels=tokens_tensor)
def gpt2_sent_logscore(self, sentence):
loss, next_loss = self.sentence_history[sentence[:self.pos]]
return loss + next_loss[sentence[self.pos]]
def gpt2_nohist_sent_logscore(self, sentence):
loss, prediction_scores, past = self.GPTrun(sentence)
return np.array(-(loss.cpu()))/np.log(2)
def updateGPT2history(self):
if self.pos > 1:
for sentence in tuple(self.suggestion_sentences):
formed_sentence = sentence[:self.pos]
loss, prediction_scores, past = self.GPTrun(formed_sentence)
next_loss = prediction_scores[0, -1].cpu().detach().numpy()
self.sentence_history[formed_sentence] = (np.array(-(loss.cpu()))/np.log(2), np.log2(softmax(next_loss)))
else:
formed_sentence = torch.tensor([self.tokenizer.bos_token_id]).to(self.device)
prediction_scores, past = self.lm(formed_sentence)
formed_sentence = tuple([formed_sentence.item()])
next_loss = prediction_scores[0].cpu().detach().numpy()
loss = np.array(0)
self.sentence_history[formed_sentence] = (loss, np.log2(softmax(next_loss)))
def ngram_sent_logscore(self, sentence):
qs = []
for ngram in ngrams(sentence, self.lm.order):
q = (ngram[-1], ngram[:-1])
if q not in self.logscoredb:
self.logscoredb[q] = self.lm.logscore(*q)
qs += [q]
return np.array([self.logscoredb[q] for q in qs]).sum()
def updatengramhistory(self):
return None
def channel_probabilities(self):
eds = np.array([candidate.distance for candidate in self.candidates])
logprobs = self.poisson_channel_model(eds) if self.channel_method_poisson else self.inv_prop_channel_model(eds)
self.channel_logprobs = {candidate.term: logprob for candidate, logprob in zip(self.candidates, logprobs)}
def poisson_channel_model(self, eds):
for ed in eds:
if ed not in self.poisson_probsdb:
self.poisson_probsdb[ed] = np.log2(poisson.pmf(k=ed, mu=self.channel_prob_param))
return np.array([self.poisson_probsdb[ed] for ed in eds])
def inv_prop_channel_model(self, eds):
inv_eds = np.reciprocal(eds.astype(float), where=eds!=0)
inv_eds[inv_eds < 1e-100] = 0.
probs = (1-self.channel_prob_param)/inv_eds.sum() * inv_eds
return np.log2(np.where(probs == 0., self.channel_prob_param, probs))
def generate_suggestion_sentences(self):
new_suggestion_sentences = {}
self.update_sentence_history()
for changed_word in tuple(self.channel_logprobs):
if self.channel_logprobs[changed_word] != 0:
for sentence in tuple(self.suggestion_sentences):
new_sentence = list(sentence)
new_sentence[self.pos] = changed_word
new_sentence = tuple(new_sentence)
new_suggestion_sentences[new_sentence] = self.lm_sent_logscore(new_sentence) * self.l + self.channel_logprobs[changed_word]
self.suggestion_sentences.update(new_suggestion_sentences)
def beam_all_init(self, input_sentence):
self.logscoredb = {}
self.poisson_probsdb = {}
self.channel_logprobs = None
self.suggestion_sentences = None
self.candidates = None
self.pos = 0
if self.channel_method_poisson:
chan_prob = np.log2(poisson.pmf(k=0, mu=self.channel_prob_param))
else:
chan_prob = np.log2(self.channel_prob_param)
return self.beam_init(input_sentence, chan_prob)
def beam_GPT_init(self, input_sentence, chan_prob):
self.sentence_history = {}
observed_sentence = tuple(self.tokenizer.encode(self.tokenizer.bos_token + input_sentence + self.tokenizer.eos_token))
self.suggestion_sentences = {observed_sentence: self.gpt2_nohist_sent_logscore(observed_sentence) * self.l + chan_prob}
return observed_sentence
def beam_ngram_init(self, input_sentence, chan_prob):
observed_sentence = self.tokenizer.encode(input_sentence)
self.suggestion_sentences = {observed_sentence: self.lm_sent_logscore(observed_sentence) * self.l + chan_prob}
return observed_sentence
def beam_search(self, input_sentence, beam_width=10, max_ed=3, candidates_cutoff=50):
observed_sentence = self.beam_all_init(input_sentence)
for e, observed_word in enumerate(observed_sentence[self.skipstart:self.skipend]):
self.pos = e + self.skipstart
lookup_word = self.tokenizer.decode(observed_word) if isinstance(self.lm, GPT2LMHeadModel) else observed_word
if lookup_word == ' ':
continue
self.candidates = self.sym_spell.lookup(lookup_word, Verbosity.ALL, max_ed)[:candidates_cutoff]
if isinstance(self.lm, GPT2LMHeadModel):
for candidate in self.candidates:
candidate.term = self.tokenizer.encode(candidate.term)[0]
self.channel_probabilities()
self.generate_suggestion_sentences()
self.suggestion_sentences = dict(sorted(self.suggestion_sentences.items(), key = lambda kv:(kv[1], kv[0]), reverse=True)[:beam_width])
if isinstance(self.lm, GPT2LMHeadModel):
return {self.tokenizer.decode(sentence)[13:-13]: np.power(2, self.suggestion_sentences[sentence]) for sentence in self.suggestion_sentences}
else:
return {self.tokenizer.decode(sentence): np.power(2, self.suggestion_sentences[sentence]) for sentence in self.suggestion_sentences}
def beam_search_sentences(self, sentences):
iterate = tqdm(sentences) if self.show_progress else sentences
df = | pd.DataFrame() | pandas.DataFrame |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
# #find parent directory and import model
# parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# sys.path.append(parent_dir)
from ..trex_exe import Trex
test = {}
class TestTrex(unittest.TestCase):
"""
Unit tests for T-Rex model.
"""
print("trex unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for trex unit tests.
:return:
"""
pass
# setup the test as needed
# e.g. pandas to open trex qaqc csv
# Read qaqc csv and create pandas DataFrames for inputs and expected outputs
def tearDown(self):
"""
Teardown routine for trex unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_trex_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty trex object
trex_empty = Trex(df_empty, df_empty)
return trex_empty
def test_app_rate_parsing(self):
"""
unittest for function app_rate_testing:
method extracts 1st and maximum from each list in a series of lists of app rates
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([], dtype="object")
result = pd.Series([], dtype="object")
expected_results = [[0.34, 0.78, 2.34], [0.34, 3.54, 2.34]]
try:
trex_empty.app_rates = pd.Series([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]], dtype='object')
# trex_empty.app_rates = ([[0.34], [0.78, 3.54], [2.34, 1.384, 2.22]])
# parse app_rates Series of lists
trex_empty.app_rate_parsing()
result = [trex_empty.first_app_rate, trex_empty.max_app_rate]
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial(self):
"""
unittest for function conc_initial:
conc_0 = (app_rate * self.frac_act_ing * food_multiplier)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [12.7160, 9.8280, 11.2320]
try:
# specify an app_rates Series (that is a series of lists, each list representing
# a set of application rates for 'a' model simulation)
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.food_multiplier_init_sg = pd.Series([110., 15., 240.], dtype='float')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
for i in range(len(trex_empty.frac_act_ing)):
result[i] = trex_empty.conc_initial(i, trex_empty.app_rates[i][0], trex_empty.food_multiplier_init_sg[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
unittest for function conc_timestep:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = [6.25e-5, 0.039685, 7.8886e-30]
try:
trex_empty.foliar_diss_hlife = pd.Series([.25, 0.75, 0.01], dtype='float')
conc_0 = pd.Series([0.001, 0.1, 10.0])
for i in range(len(conc_0)):
result[i] = trex_empty.conc_timestep(i, conc_0[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_percent_to_frac(self):
"""
unittest for function percent_to_frac:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([.04556, .1034, .9389], dtype='float')
try:
trex_empty.percent_incorp = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.percent_to_frac(trex_empty.percent_incorp)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_inches_to_feet(self):
"""
unittest for function inches_to_feet:
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([0.37966, 0.86166, 7.82416], dtype='float')
try:
trex_empty.bandwidth = pd.Series([4.556, 10.34, 93.89], dtype='float')
result = trex_empty.inches_to_feet(trex_empty.bandwidth)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird(self):
"""
unittest for function at_bird:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
# following variable is unique to at_bird and is thus sent via arg list
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.aw_bird_sm)):
result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_bird1(self):
"""
unittest for function at_bird1; alternative approach using more vectorization:
adjusted_toxicity = self.ld50_bird * (aw_bird / self.tw_bird_ld50) ** (self.mineau_sca_fact - 1)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([69.17640, 146.8274, 56.00997], dtype='float')
try:
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
# for i in range(len(trex_empty.aw_bird_sm)):
# result[i] = trex_empty.at_bird(i, trex_empty.aw_bird_sm[i])
result = trex_empty.at_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_bird(self):
"""
unittest for function fi_bird:
food_intake = (0.648 * (aw_bird ** 0.651)) / (1 - mf_w_bird)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.19728, 22.7780, 59.31724], dtype='float')
try:
#?? 'mf_w_bird_1' is a constant (i.e., not an input whose value changes per model simulation run); thus it should
#?? be specified here as a constant and not a pd.series -- if this is correct then go ahead and change next line
trex_empty.mf_w_bird_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_bird(trex_empty.aw_bird_sm, trex_empty.mf_w_bird_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sc_bird(self):
"""
unittest for function sc_bird:
m_s_a_r = ((self.app_rate * self.frac_act_ing) / 128) * self.density * 10000 # maximum seed application rate=application rate*10000
risk_quotient = m_s_a_r / self.noaec_bird
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([6.637969, 77.805, 34.96289, np.nan], dtype='float')
try:
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4], [3.]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.frac_act_ing = pd.Series([0.15, 0.20, 0.34, np.nan], dtype='float')
trex_empty.density = pd.Series([8.33, 7.98, 6.75, np.nan], dtype='float')
trex_empty.noaec_bird = pd.Series([5., 1.25, 12., np.nan], dtype='float')
result = trex_empty.sc_bird()
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_sa_bird_1(self):
"""
# unit test for function sa_bird_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm = pd.Series([0.228229, 0.704098, 0.145205], dtype = 'float')
expected_results_md = pd.Series([0.126646, 0.540822, 0.052285], dtype = 'float')
expected_results_lg = pd.Series([0.037707, 0.269804, 0.01199], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='float')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_bird_2(self):
"""
# unit test for function sa_bird_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.018832, 0.029030, 0.010483], dtype = 'float')
expected_results_md = pd.Series([2.774856e-3, 6.945353e-3, 1.453192e-3], dtype = 'float')
expected_results_lg =pd.Series([2.001591e-4, 8.602729e-4, 8.66163e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_bird_md = pd.Series([115., 120., 130.], dtype='float')
trex_empty.aw_bird_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.nagy_bird_coef_sm = 0.02
trex_empty.nagy_bird_coef_md = 0.1
trex_empty.nagy_bird_coef_lg = 1.0
result_sm = trex_empty.sa_bird_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_bird_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_bird_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_1(self):
"""
# unit test for function sa_mamm_1
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([0.022593, 0.555799, 0.010178], dtype = 'float')
expected_results_md = pd.Series([0.019298, 0.460911, 0.00376], dtype = 'float')
expected_results_lg =pd.Series([0.010471, 0.204631, 0.002715], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_bird_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_1("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_1("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_1("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sa_mamm_2(self):
"""
# unit test for function sa_mamm_2
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.46206e-3, 3.103179e-2, 1.03076e-3], dtype = 'float')
expected_results_md = pd.Series([1.304116e-3, 1.628829e-2, 4.220702e-4], dtype = 'float')
expected_results_lg =pd.Series([1.0592147e-4, 1.24391489e-3, 3.74263186e-5], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
trex_empty.max_seed_rate = pd.Series([33.19, 20.0, 45.6])
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.ld50_mamm = pd.Series([321., 100., 400.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sa_mamm_2("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sa_mamm_2("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sa_mamm_2("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_sc_mamm(self):
"""
# unit test for function sc_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result_sm = pd.Series([], dtype = 'float')
result_md = pd.Series([], dtype = 'float')
result_lg = pd.Series([], dtype = 'float')
expected_results_sm =pd.Series([2.90089, 15.87995, 8.142130], dtype = 'float')
expected_results_md = pd.Series([2.477926, 13.16889, 3.008207], dtype = 'float')
expected_results_lg =pd.Series([1.344461, 5.846592, 2.172211], dtype = 'float')
try:
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'first_app_rate' per model simulation run
trex_empty.density = pd.Series([8.33, 7.98, 6.75], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.tw_mamm = pd.Series([350., 225., 390.], dtype='float')
trex_empty.noael_mamm = pd.Series([2.5, 3.5, 0.5], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.aw_mamm_md = pd.Series([35., 45., 25.], dtype='float')
trex_empty.aw_mamm_lg = pd.Series([1015., 1020., 1030.], dtype='float')
#reitierate constants here (they have been set in 'trex_inputs'; repeated here for clarity)
trex_empty.mf_w_mamm_1 = 0.1
trex_empty.nagy_mamm_coef_sm = 0.015
trex_empty.nagy_mamm_coef_md = 0.035
trex_empty.nagy_mamm_coef_lg = 1.0
result_sm = trex_empty.sc_mamm("small")
npt.assert_allclose(result_sm,expected_results_sm,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_md = trex_empty.sc_mamm("medium")
npt.assert_allclose(result_md,expected_results_md,rtol=1e-4, atol=0, err_msg='', verbose=True)
result_lg = trex_empty.sc_mamm("large")
npt.assert_allclose(result_lg,expected_results_lg,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab_sm = [result_sm, expected_results_sm]
tab_md = [result_md, expected_results_md]
tab_lg = [result_lg, expected_results_lg]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab_sm, headers='keys', tablefmt='rst'))
print(tabulate(tab_md, headers='keys', tablefmt='rst'))
print(tabulate(tab_lg, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird(self):
"""
# unit test for function ld50_rg_bird (LD50ft-2 for Row/Band/In-furrow granular birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_bird1(self):
"""
# unit test for function ld50_rg_bird1 (LD50ft-2 for Row/Band/In-furrow granular birds)
this is a duplicate of the 'test_ld50_rg_bird' method using a more vectorized approach to the
calculations; if desired other routines could be modified similarly
--comparing this method with 'test_ld50_rg_bird' it appears (for this test) that both run in the same time
--but I don't think this would be the case when 100's of model simulation runs are executed (and only a small
--number of the application_types apply to this method; thus I conclude we continue to use the non-vectorized
--approach -- should be revisited when we have a large run to execute
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([346.4856, 25.94132, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.app_rate_parsing() #get 'max app rate' per model simulation run
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_bird1(trex_empty.aw_bird_sm)
npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, equal_nan=True, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_bird(self):
"""
# unit test for function ld50_bl_bird (LD50ft-2 for broadcast liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, 33.77777, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_bird
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_bird(self):
"""
# unit test for function ld50_bg_bird (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([46.19808, np.nan, 0.4214033], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Liquid',
'Broadcast-Granular'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_bird(self):
"""
# unit test for function ld50_rl_bird (LD50ft-2 for Row/Band/In-furrow liquid birds)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 2.20701, 0.0363297], dtype='float')
try:
# following parameter values are unique for ld50_bg_bird
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
# following parameter values are needed for internal call to "test_at_bird"
# results from "test_at_bird" test using these values are [69.17640, 146.8274, 56.00997]
trex_empty.ld50_bird = pd.Series([100., 125., 90.], dtype='float')
trex_empty.tw_bird_ld50 = pd.Series([175., 100., 200.], dtype='float')
trex_empty.mineau_sca_fact = pd.Series([1.15, 0.9, 1.25], dtype='float')
trex_empty.aw_bird_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rl_bird(trex_empty.aw_bird_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_at_mamm(self):
"""
unittest for function at_mamm:
adjusted_toxicity = self.ld50_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
result = pd.Series([], dtype = 'float')
expected_results = pd.Series([705.5036, 529.5517, 830.6143], dtype='float')
try:
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
for i in range(len(trex_empty.ld50_mamm)):
result[i] = trex_empty.at_mamm(i, trex_empty.aw_mamm_sm[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_anoael_mamm(self):
"""
unittest for function anoael_mamm:
adjusted_toxicity = self.noael_mamm * ((self.tw_mamm / aw_mamm) ** 0.25)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([5.49457, 9.62821, 2.403398], dtype='float')
try:
trex_empty.noael_mamm = pd.Series([2.5, 5.0, 1.25], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.anoael_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_fi_mamm(self):
"""
unittest for function fi_mamm:
food_intake = (0.621 * (aw_mamm ** 0.564)) / (1 - mf_w_mamm)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([3.17807, 16.8206, 42.28516], dtype='float')
try:
trex_empty.mf_w_mamm_1 = pd.Series([0.1, 0.8, 0.9], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.fi_mamm(trex_empty.aw_mamm_sm, trex_empty.mf_w_mamm_1)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bl_mamm(self):
"""
# unit test for function ld50_bl_mamm (LD50ft-2 for broadcast liquid)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Liquid', 'Broadcast-Liquid',
'Non-Broadcast'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "test_at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='',
verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_bg_mamm(self):
"""
# unit test for function ld50_bg_mamm (LD50ft-2 for broadcast granular)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([4.52983, 9.36547, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular', 'Broadcast-Granular',
'Broadcast-Liquid'], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_bg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rl_mamm(self):
"""
# unit test for function ld50_rl_mamm (LD50ft-2 for Row/Band/In-furrow liquid mammals)
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([np.nan, 0.6119317, 0.0024497], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Broadcast-Granular',
'Row/Band/In-furrow-Liquid',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
result = trex_empty.ld50_rl_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_ld50_rg_mamm(self):
"""
# unit test for function ld50_rg_mamm
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([33.9737, 7.192681, np.nan], dtype='float')
try:
# following parameter values are unique for ld50_bl_mamm
trex_empty.application_type = pd.Series(['Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Granular',
'Row/Band/In-furrow-Liquid',], dtype='object')
trex_empty.app_rates = pd.Series([[0.34, 1.384, 13.54], [0.78, 11.34, 3.54],
[2.34, 1.384, 3.4]], dtype='object')
trex_empty.frac_act_ing = pd.Series([0.34, 0.84, 0.02], dtype='float')
trex_empty.frac_incorp = pd.Series([0.25, 0.76, 0.05], dtype= 'float')
trex_empty.bandwidth = pd.Series([2., 10., 30.], dtype = 'float')
trex_empty.row_spacing = pd.Series([20., 32., 50.], dtype = 'float')
# following parameter values are needed for internal call to "at_mamm"
# results from "test_at_mamm" test using these values are [705.5036, 529.5517, 830.6143]
trex_empty.ld50_mamm = pd.Series([321., 275., 432.], dtype='float')
trex_empty.tw_mamm = pd.Series([350., 275., 410.], dtype='float')
trex_empty.aw_mamm_sm = pd.Series([15., 20., 30.], dtype='float')
result = trex_empty.ld50_rg_mamm(trex_empty.aw_mamm_sm)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0,
err_msg='', verbose=True, equal_nan=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_eec_diet_max(self):
"""
combined unit test for methods eec_diet_max & eec_diet_timeseries;
* this test calls eec_diet_max, which in turn calls eec_diet_timeseries (which produces
concentration timeseries), which in turn calls conc_initial and conc_timestep
* eec_diet_max processes the timeseries and extracts the maximum values
* this test tests both eec_diet_max & eec_diet_timeseries together (ok, so this violates the exact definition
* of 'unittest', get over it)
* the assertion check is that the maximum values from the timeseries match expectations
* this assumes that for the maximums to be 'as expected' then the timeseries are as well
* note: the 1st application day ('day_out') for the 2nd model simulation run is set to 0 here
* to make sure the timeseries processing works when an application occurs on 1st day of year
"""
# create empty pandas dataframes to create empty object for this unittest
trex_empty = self.create_trex_object()
expected_results = pd.Series([1.734, 145.3409, 0.702], dtype='float')
num_app_days = pd.Series([], dtype='int')
try:
#specifying 3 different application scenarios of 1, 4, and 2 applications
trex_empty.app_rates = pd.Series([[0.34], [0.78, 11.34, 3.54, 1.54], [2.34, 1.384]], dtype='object')
trex_empty.day_out = pd.Series([[5], [0, 10, 20, 50], [150, 250]], dtype='object')
for i in range(len(trex_empty.app_rates)):
trex_empty.num_apps[i] = len(trex_empty.app_rates[i])
num_app_days[i] = len(trex_empty.day_out[i])
assert (trex_empty.num_apps[i] == num_app_days[i]), 'series of app-rates and app_days do not match'
trex_empty.frac_act_ing = | pd.Series([0.34, 0.84, 0.02]) | pandas.Series |
import logging
import os
import sys
import pandas as pd
import lightkurve as lk
import foldedleastsquares as tls
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.wcs import WCS
from astroquery.mast import Catalogs, Tesscut
from sherlockpipe.ois.OisManager import OisManager
from lcbuilder.objectinfo.MissionFfiIdObjectInfo import MissionFfiIdObjectInfo
from lcbuilder.objectinfo.preparer.MissionFfiLightcurveBuilder import MissionFfiLightcurveBuilder
from lcbuilder.objectinfo.MissionObjectInfo import MissionObjectInfo
from lcbuilder.objectinfo.preparer.MissionLightcurveBuilder import MissionLightcurveBuilder
from lcbuilder.eleanor import TargetData
from lcbuilder import eleanor
from lcbuilder.star.TicStarCatalog import TicStarCatalog
import numpy as np
import tsfresh
from tsfresh.utilities.dataframe_functions import impute
def download_neighbours(ID: int, sectors: np.ndarray, search_radius: int = 10):
"""
Queries TIC for sources near the target and obtains a cutout
of the pixels enclosing the target.
Args:
ID (int): TIC ID of the target.
sectors (numpy array): Sectors in which the target
has been observed.
search_radius (int): Number of pixels from the target
star to search.
"""
ID = ID
sectors = sectors
search_radius = search_radius
N_pix = 2 * search_radius + 2
# query TIC for nearby stars
pixel_size = 20.25 * u.arcsec
df = Catalogs.query_object(
str(ID),
radius=search_radius * pixel_size,
catalog="TIC"
)
new_df = df[
"ID", "Tmag", "ra", "dec", "mass", "rad", "Teff", "plx"
]
stars = new_df.to_pandas()
TESS_images = []
col0s, row0s = [], []
pix_coords = []
# for each sector, get FFI cutout and transform RA/Dec into
# TESS pixel coordinates
for j, sector in enumerate(sectors):
Tmag = stars["Tmag"].values
ra = stars["ra"].values
dec = stars["dec"].values
cutout_coord = SkyCoord(ra[0], dec[0], unit="deg")
cutout_hdu = Tesscut.get_cutouts(cutout_coord, size=N_pix, sector=sector)[0]
cutout_table = cutout_hdu[1].data
hdu = cutout_hdu[2].header
wcs = WCS(hdu)
TESS_images.append(np.mean(cutout_table["FLUX"], axis=0))
col0 = cutout_hdu[1].header["1CRV4P"]
row0 = cutout_hdu[1].header["2CRV4P"]
col0s.append(col0)
row0s.append(row0)
pix_coord = np.zeros([len(ra), 2])
for i in range(len(ra)):
RApix = np.asscalar(
wcs.all_world2pix(ra[i], dec[i], 0)[0]
)
Decpix = np.asscalar(
wcs.all_world2pix(ra[i], dec[i], 0)[1]
)
pix_coord[i, 0] = col0 + RApix
pix_coord[i, 1] = row0 + Decpix
pix_coords.append(pix_coord)
# for each star, get the separation and position angle
# from the targets star
sep = [0]
pa = [0]
c_target = SkyCoord(
stars["ra"].values[0],
stars["dec"].values[0],
unit="deg"
)
for i in range(1, len(stars)):
c_star = SkyCoord(
stars["ra"].values[i],
stars["dec"].values[i],
unit="deg"
)
sep.append(
np.round(
c_star.separation(c_target).to(u.arcsec).value,
3
)
)
pa.append(
np.round(
c_star.position_angle(c_target).to(u.deg).value,
3
)
)
stars["sep (arcsec)"] = sep
stars["PA (E of N)"] = pa
stars = stars
TESS_images = TESS_images
col0s = col0s
row0s = row0s
pix_coords = pix_coords
return stars
dir = "training_data/"
if not os.path.exists(dir):
os.mkdir(dir)
file_dir = dir + "/ml.log"
if os.path.exists(file_dir):
os.remove(file_dir)
formatter = logging.Formatter('%(message)s')
logger = logging.getLogger()
while len(logger.handlers) > 0:
logger.handlers.pop()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.FileHandler(file_dir)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
logger.addHandler(handler)
positive_dir = dir + "/tp/"
negative_dir = dir + "/ntp/"
if not os.path.isdir(dir):
os.mkdir(dir)
if not os.path.isdir(positive_dir):
os.mkdir(positive_dir)
if not os.path.isdir(negative_dir):
os.mkdir(negative_dir)
tp_tic_list = [251848941]
ois = OisManager().load_ois()
ois = ois[(ois["Disposition"] == "CP") | (ois["Disposition"] == "KP")]
mission_lightcurve_builder = MissionLightcurveBuilder()
mission_ffi_lightcurve_builder = MissionFfiLightcurveBuilder()
# TODO fill excluded_ois from given csv file
excluded_ois = {}
# TODO fill additional_ois from given csv file with their ephemeris
additional_ois_df = pd.DataFrame(columns=['Object Id', 'name', 'period', 'period_err', 't0', 'to_err', 'depth',
'depth_err', 'duration', 'duration_err'])
failed_targets_df = pd.DataFrame(columns=["Object Id"])
for tic in ois["Object Id"].unique():
tic_id = str(tic)
target_dir = positive_dir + tic_id + "/"
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
lc_short = None
try:
logging.info("Trying to get short cadence info for " + tic)
lcbuild_short = \
mission_lightcurve_builder.build(MissionObjectInfo(tic_id, 'all'), None)
lc_short = lcbuild_short.lc
lc_data = lcbuild_short.lc_data
lc_data["centroids_x"] = lc_data["centroids_x"] - np.nanmedian(lc_data["centroids_x"])
lc_data["centroids_y"] = lc_data["centroids_y"] - np.nanmedian(lc_data["centroids_y"])
lc_data["motion_x"] = lc_data["motion_x"] - np.nanmedian(lc_data["motion_x"])
lc_data["motion_y"] = lc_data["motion_y"] - np.nanmedian(lc_data["motion_y"])
lc_data.to_csv(target_dir + "time_series_short.csv")
tpf_short = lk.search_targetpixelfile(tic_id, cadence="short", author="spoc").download_all()
short_periodogram = lc_short.to_periodogram(oversample_factor=5)
periodogram_df = pd.DataFrame(columns=['period', 'power'])
periodogram_df["period"] = short_periodogram.period.value
periodogram_df["power"] = short_periodogram.power.value
periodogram_df.to_csv(target_dir + "periodogram_short.csv")
except Exception as e:
logging.warning("No Short Cadence data for target " + tic)
logging.exception(e)
logging.info("Trying to get long cadence info for " + tic)
try:
lcbuild_long = \
mission_ffi_lightcurve_builder.build(MissionFfiIdObjectInfo(tic_id, 'all'), None)
sectors = lcbuild_long.sectors
lc_long = lcbuild_long.lc
lc_data = lcbuild_long.lc_data
lc_data["centroids_x"] = lc_data["centroids_x"] - np.nanmedian(lc_data["centroids_x"])
lc_data["centroids_y"] = lc_data["centroids_y"] - np.nanmedian(lc_data["centroids_y"])
lc_data["motion_x"] = lc_data["motion_x"] - np.nanmedian(lc_data["motion_x"])
lc_data["motion_y"] = lc_data["motion_y"] - np.nanmedian(lc_data["motion_y"])
lc_data.to_csv(target_dir + "time_series_long.csv")
lcf_long = lc_long.remove_nans()
tpf_long = lk.search_targetpixelfile(tic_id, cadence="long", author="spoc").download_all()
# TODO somehow store tpfs images
long_periodogram = lc_long.to_periodogram(oversample_factor=5)
periodogram_df = pd.DataFrame(columns=['period', 'power'])
periodogram_df["period"] = long_periodogram.period.value
periodogram_df["power"] = long_periodogram.power.value
periodogram_df.to_csv(target_dir + "periodogram_long.csv")
logging.info("Downloading neighbour stars for " + tic)
stars = download_neighbours(tic, sectors)
# TODO get neighbours light curves
logging.info("Classifying candidate points for " + tic)
target_ois = ois[ois["Object Id"] == tic_id]
target_ois = target_ois[(target_ois["Disposition"] == "CP") | (target_ois["Disposition"] == "KP")]
target_ois_df = pd.DataFrame(columns=['id', 'name', 'period', 'period_err', 't0', 'to_err', 'depth', 'depth_err', 'duration', 'duration_err'])
tags_series_short = np.full(len(lc_short.time), "BL")
tags_series_long = np.full(len(lc_long.time), "BL")
for index, row in target_ois.iterrows():
if row["OI"] not in excluded_ois:
logging.info("Classifying candidate points with OI %s, period %s, t0 %s and duration %s for " + tic,
row["OI"], row["Period (days)"], row["Epoch (BJD)"], row["Duration (hours)"])
target_ois_df = target_ois_df.append({"id": row["Object Id"], "name": row["OI"], "period": row["Period (days)"],
"period_err": row["Period (days) err"], "t0": row["Epoch (BJD)"] - 2457000.0,
"to_err": row["Epoch (BJD) err"], "depth": row["Depth (ppm)"],
"depth_err": row["Depth (ppm) err"], "duration": row["Duration (hours)"],
"duration_err": row["Duration (hours) err"]}, ignore_index=True)
if lc_short is not None:
mask_short = tls.transit_mask(lc_short.time.value, row["Period (days)"], row["Duration (hours)"] / 24, row["Epoch (BJD)"] - 2457000.0)
tags_series_short[mask_short] = "TP"
mask_long = tls.transit_mask(lc_long.time.value, row["Period (days)"], row["Duration (hours)"] / 24, row["Epoch (BJD)"] - 2457000.0)
tags_series_long[mask_long] = "TP"
target_additional_ois_df = additional_ois_df[additional_ois_df["Object Id"] == tic_id]
for index, row in target_additional_ois_df.iterrows():
if row["OI"] not in excluded_ois:
target_ois_df = target_ois_df.append({"id": row["Object Id"], "name": row["OI"], "period": row["Period (days)"],
"period_err": row["Period (days) err"], "t0": row["Epoch (BJD)"] - 2457000.0,
"to_err": row["Epoch (BJD) err"], "depth": row["Depth (ppm)"],
"depth_err": row["Depth (ppm) err"], "duration": row["Duration (hours)"],
"duration_err": row["Duration (hours) err"]}, ignore_index=True)
if lc_short is not None:
mask_short = tls.transit_mask(lc_short.time.value, row["Period (days)"], row["Duration (hours)"] / 24, row["Epoch (BJD)"] - 2457000.0)
tags_series_short[mask_short] = "TP"
mask_long = tls.transit_mask(lc_long.time.value, row["Period (days)"], row["Duration (hours)"] / 24, row["Epoch (BJD)"] - 2457000.0)
tags_series_long[mask_long] = "TP"
if lc_short is not None:
lc_classified_short = pd.DataFrame.from_dict({"time": lc_short.time.value, "flux": lc_short.flux.value, "tag": tags_series_short})
lc_classified_short.to_csv(target_dir + "lc_classified_short.csv")
lc_classified_long = pd.DataFrame.from_dict({"time": lc_long.time.value, "flux": lc_long.flux.value, "tag": tags_series_long})
lc_classified_long.to_csv(target_dir + "lc_classified_long.csv")
# TODO store folded light curves -with local and global views-(masking previous candidates?)
except:
failed_targets_df.append({tic_id})
failed_targets_df.to_csv(dir, index=False)
tsfresh_short_df = pd.DataFrame(columns=['id', 'time', 'flux', 'flux_err', 'background_flux', 'quality', 'centroids_x',
'centroids_y', 'motion_x', 'motion_y'])
tsfresh_long_df = pd.DataFrame(columns=['id', 'time', 'flux', 'flux_err', 'background_flux', 'quality', 'centroids_x',
'centroids_y', 'motion_x', 'motion_y'])
tsfresh_tags_short = []
tsfresh_tags_long = []
for tic_dir in os.listdir(positive_dir):
short_lc_dir = positive_dir + "/" + tic_dir + "/time_series_short.csv"
if os.path.exists(short_lc_dir):
lc_short_df = pd.read_csv(positive_dir + "/" + tic_dir + "/time_series_short.csv")
lc_short_df['id'] = tic_dir
tsfresh_short_df.append(lc_short_df)
tsfresh_tags_short.append([tic_dir, 1])
lc_long_df = pd.read_csv(positive_dir + "/" + tic_dir + "/time_series_long.csv")
lc_long_df['id'] = tic_dir
tsfresh_long_df.append(lc_long_df)
tsfresh_tags_long.append([tic_dir, 1])
for tic_dir in os.listdir(negative_dir):
short_lc_dir = negative_dir + "/" + tic_dir + "/time_series_short.csv"
if os.path.exists(short_lc_dir):
lc_short_df = pd.read_csv(negative_dir + "/" + tic_dir + "/time_series_short.csv")
lc_short_df['id'] = tic_dir
tsfresh_short_df.append(lc_short_df)
tsfresh_tags_short.append([tic_dir, 1])
lc_long_df = | pd.read_csv(negative_dir + "/" + tic_dir + "/time_series_long.csv") | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
#===========================================================================================================
Copyright 2006-2021 Paseman & Associates (www.paseman.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
#===========================================================================================================
compareYND() calculates monthlyAbsMom5 timing signal using 3 different inputs:
o monthlyAbsMom5Yahoo - uses SPY and IRX from Yahoo (If panadasbdatareader is down, I use a cached file.)
o monthlyAbsMom5Norgate - uses SPY and IRX from Norgate (Thanks Don for these files)
o monthlyAbsMom5Don - uses SPY and IRX from other files supplied by Don
compareYND() runs all three, concatenates them and compares the pairwise Buy/Sell Signals between norgate/yahoo and norgate/don
Note that Norgate/Yahoo gives an error on 2001-05-31 wiith Yahoo raising a spurious(?) sell signal.
The reason is clear. Yahoo data shows a (slight) monthly decrease in SPY while Norgate/Don show a sight increase.
Note also the following discrepancies for 11/29/2019, the Friday after thanksgiving.
Don's Tbill File
11/29/2019 12:00 AM 13.8485032869658 13.8485032869658 13.8485032869658 13.8485032869658 0 13.8485032869658 13.8485032869658
Yahoo's IRX history - https://finance.yahoo.com/quote/%5EIRX/history?period1=1561852800&period2=1625011200&interval=1d&filter=history&frequency=1d&includeAdjustedClose=true
Nov 29, 2019 - - - - - -
Norgates IRX history
20191129 1.553 1.553 1.54 1.54
So either my code samples the data incorrectly, or the data sources do not match.
Feedback appreciated.
"""
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 100)
pd.set_option('display.width', 350)
pd.set_option('display.max_rows', None)
#===========================================================================================================
def monthlyAbsMom5(df,overRideTbillEC=[]): # df has ['%IRX'] and ["SPY"]
"""Calculate monthlyAbsMom5() as per <NAME> May 2, 2021, 7:47 PM"""
# For Tbill I calculate an equity curve for it using the t-bill yield ^IRX.
# Daily return for date i is (1+^IRX(i)/100)^(1/252).
df['%IRXsmoothed']=(1 + df['%IRX']/100.0)**(1.0/252.0)
# Then I use those returns to create the TBILL EC.
# TBILL[0]=10 – an arbitrary starting value for the first date for ^IRX
df['%IRXsmoothed'].iloc[0]=10
# TBILL[i] = TBILL[i-1]*(1+^IRX[i-1]/100)^(1/252), for i=1-last ^IRX-1
df['tbillEC']=df['%IRXsmoothed'].cumprod()
if len(overRideTbillEC)>0: df['tbillEC']=overRideTbillEC
#print(df)
# For absm2M(5,1)(spy,tbill) I use month end prices – not 105 days back.
# df.fillna(0).resample('BM').last() includes invalid dates like 20020328 (Good Friday), 20040528, 20100528
# Instead of calendars, depend on the fact that SPY and IRX are only quoted on dates that market is open.
# https://stackoverflow.com/questions/48288059/how-to-get-last-day-of-each-month-in-pandas-dataframe-index-using-timegrouper
Mdf=df.loc[df.groupby(df.index.to_period('M')).apply(lambda x: x.index.max())]
# Then I compute absmM(5)(spy,tbill) = gainM5(Spy) – gainM5(Tbill)
# and similarly for absmM(1)(spy,tbill)=gainM1(spy)-gainM1(tbill).
Mdf['absmM(5)']=Mdf["SPY"].pct_change(periods=5)-Mdf["tbillEC"].pct_change(periods=5)
Mdf['absmM(1)']=Mdf["SPY"].pct_change(periods=1)-Mdf["tbillEC"].pct_change(periods=1)
# If either absmM(5)(spy,tbill) or absmM(1)(spy,tbill) is >=0, you are in equities.
# You need both negative to be in cash.
Mdf['SELL']=np.where(((Mdf['absmM(5)']<0.0) & (Mdf['absmM(1)']<0.0)), 'SELL','')
return Mdf['19991231':] # 19940331
#===========================================================================================================
def monthlyAbsMom5Norgate():
SPY = pd.read_csv("SPY-NorgateExtended.txt", header=0, sep='\t', index_col=0, parse_dates=True)#.fillna(0)
IRX = pd.read_csv("^IRX.txt", header=0, sep='\t', index_col=0, parse_dates=True)#.fillna(0)
IRX.rename(columns={"Close":'%IRX'}, inplace=True)
df=pd.concat([SPY["Close"],IRX['%IRX']], axis=1)
df.rename(columns={"Close":"SPY"}, inplace=True)
return monthlyAbsMom5(df)
#===========================================================================================================
import pandas_datareader as pdr
import datetime
def getTickerPriceColumnYahoo(ticker,columnName="Close"):
try:
start = datetime.datetime(1993, 1, 1)
end = datetime.datetime(2021, 7, 2)
df= pdr.get_data_yahoo(ticker, start, end)
except Exception as ex:
# https://365datascience.com/question/remotedataerror-unable-to-read-url-for-yahoo-using-pandas-datareader/
# as of 7/8/2021, get "Our engineers are working quickly to resolve the issue."
# So retrieve a cached version
print(ex.args)
df = pd.read_csv(ticker+".csv", header=0, index_col=0, parse_dates=True)
df.rename(columns={columnName:ticker}, inplace=True)
return df[ticker]
#===========================================================================================================
def monthlyAbsMom5Yahoo():
df=getTickerPriceColumnYahoo("SPY",columnName="Adj Close").to_frame()
df["%IRX"]=getTickerPriceColumnYahoo("^IRX")
return monthlyAbsMom5(df)
#===========================================================================================================
def monthlyAbsMom5Don():
df= | pd.read_csv("SPY-NorgateExtended.txt",header=0, sep='\t', index_col=0, parse_dates=True) | pandas.read_csv |
from os.path import expanduser
from text_extraction import *
import pandas as pd
import argparse
source_path = expanduser('~') + '/Downloads/kanika/source2'
parser = argparse.ArgumentParser()
parser.add_argument("--source_dir", help="1 This should be the source directory of files to be processed", type=str, required=False)
args = parser.parse_args()
def create_excel_file(file_source, tess_dir, excel_output_dir):
count = 1
f_list = os.listdir(file_source)
number_files = len(f_list)
data_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 14:48:18 2018
@author: RomanGutin
"""
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
### CrossValidation Score Functions###
def concat_train(x): #I wrote this function to convert the list of training dataframes into a single dataframe,
for j in range(len(x)): #Helper function in CrossValidation(.)
if j==0:
concat_set=[]
concat_set.append(x[j])
else:
concat_set= concat_set
concat_set.append(x[j])
train_data= | pd.concat(concat_set) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 29 17:53:37 2019
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
import os, gc
from glob import glob
from tqdm import tqdm
import sys
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
import utils
#utils.start(__file__)
# =============================================================================
#SUBMIT_FILE_PATH = '../output/0328-1.csv.gz'
#
#COMMENT = 'lgb shuffle row'
EXE_SUBMIT = True
NFOLD = 5
LOOP = 1
param = {
'objective': 'binary',
'metric': 'None',
'learning_rate': 0.1,
'max_depth': -1,
'num_leaves': 2**6 -1,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.5,
'subsample': 0.7,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
}
NROUND = 9999
ESR = 100
VERBOSE_EVAL = 50
SEED = np.random.randint(9999)
# =============================================================================
# load
# =============================================================================
X_train = | pd.read_csv('../input/train.csv.zip') | pandas.read_csv |
import pandas as pd
from sqlalchemy import create_engine
import sqlite3
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import bar_chart_race as bcr
import streamlit as st
import ffmpeg
import rpy2.robjects as ro
from math import pi
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
with st.echo(code_location="below"):
st.title('''
Spotify trends
''')
st.write('''
Добрый день, коллега. Сегодня мы будем работать с базой данных Spotify, которая лежит на kaggle.
В рамках этого дэшборда мы познакомимся с самим датасетом и попытаемся сделать какие-нибудь выводы о развитии музыки.
Было бы здорово, если ты бы сейчас открыл свой любимый музыкальный сервис, включил наушники и понаслаждался треками,
которые будут упоминаться в этом небольшом исследовании)
''')
st.write('''
Датасет слишком большой, поэтому я приложил в файл свой файл zip с датасетами. Нужно его вложить в одну папку с demo_app.py
Если хероку не сработает, то можно ввести streamlit run demo_app.py в терминал этого файла, открытый в PyCharm.
''')
st.write('''
Для начала я проведу небольшую "чистку" данных. А именно уберу лайвы от музыкантов, чтобы нам было чуть-чуть удобнее
и ничего не могло сильно испортить наши данные.
''')
spotify_track_data = pd.read_csv("tracks.csv")
spotify_track_data.head()
engine = create_engine('sqlite://', echo=False)
spotify_track_data.to_sql('tracks', con=engine)
engine.execute('''
select count (id)
from tracks
''').fetchall()
engine.execute('''
select count (id)
from tracks
where name like '%(Live%'
''').fetchall()
engine.execute('''
delete
from tracks
where name like '%(Live'
''')
rows = engine.execute('''
select *
from tracks
''').fetchall()
spotify_track_data = pd.DataFrame(list(rows))
spotify_track_data.columns = ['index','id', 'name', 'popularity',
'duration_ms', 'explicit', 'artists',
'id_artists', 'release_date', 'danceability',
'energy', 'key', 'loudness',
'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence',
'tempo', 'time_signature']
spotify_track_data.artists = spotify_track_data.artists.replace('['']', np.nan)
spotify_track_data.release_date = pd.to_datetime(spotify_track_data.release_date)
spotify_track_data['year'] = ( | pd.to_datetime(spotify_track_data.release_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# 时间系列模型
# forecast monthlybirths with xgboost
from numpy import asarray
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.metrics import mean_absolute_error
from xgboost import XGBRegressor
from matplotlib import pyplot
# transform a time series dataset into a supervised learning dataset
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols = list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
# put it all together
agg = concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg.values
# split a univariatedataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test, :], data[-n_test:, :]
# fit an xgboost model and make a one step prediction
def xgboost_forecast(train, testX):
# transform list into array
train = asarray(train) # split into input and output columns
trainX, trainy = train[:, : -1], train[:, -1]
# fit model
model = XGBRegressor(objective='reg:squarederror', n_estimators=1000)
model.fit(trainX, trainy)
# make a one-step prediction
yhat = model.predict(asarray([testX]))
print(yhat)
return yhat[0]
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
print("train:", train)
print("test:", test)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the testset
for i in range(len(test)):
# split test row into input and output columns
testX, testy = test[i, : -1], test[i, -1]
# fit model on history and make a prediction
yhat = xgboost_forecast(history, testX)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# summarize progress
print('>expected=%.1f,predicted=%.1f' % (testy, yhat))
# estimate prediction error
error = mean_absolute_error(test[:, -1], predictions)
return error, test[:, -1], predictions
if (__name__ == "__main__"):
# load the dataset,数据url:https://raw.githubusercontent.com/jbrownlee/Datasets/master/daily-total-female-births.csv
# series =read_csv( 'daily-total-female-births.csv', header= 0, index_col= 0)
series = | read_csv('../data/per_month_sale_and_risk.csv') | pandas.read_csv |
import os
import collections
import pandas
import pandas as pd
import matplotlib, seaborn, numpy
from matplotlib import pyplot
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier, LogisticRegression, LogisticRegressionCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import cross_val_score
import parse_bepipred
def count_aa(string):
total_counts = []
amino_acids = ['H', 'E', 'V', 'A', 'N', 'M', 'K', 'F', 'I', 'P', 'D', 'R', 'Y', 'T', 'S', 'W', 'G', 'C', 'L', 'Q']
for aa in amino_acids:
total_counts.append(string.lower().count(aa.lower()))
return total_counts
def get_vectorized_sequences(list_sequences):
vectorized_sequences = []
for num in range(len(list_sequences)):
sequence = list_sequences[num]
num_aa = count_aa(sequence)
#normalized_num_aa = [c/len(sequence) for c in num_aa]
#num_hydrophobic = []
#final_vector = normalized_num_aa# + [count_attribute(sequence, "charged")] + [count_attribute(sequence, "polar")] + [count_attribute(sequence, "nonpolar")]
#final_vector = [count_attribute(sequence, "charged")] + [count_attribute(sequence, "polar")] + [count_attribute(sequence, "nonpolar")]
vectorized_sequences.append(num_aa)
return vectorized_sequences
class Sequence():
def __init__(self, uniprot_id, start, end, sequence, num_hits):
self.uniprot_id = uniprot_id
self.start = start
self.end = end
self.sequence = sequence
self.num_hits = num_hits
class UniprotGroup():
def __init__(self, list_of_sequences):
self.list_of_sequences = list_of_sequences
sorted_seq = sorted(self.list_of_sequences, key=lambda sequence: sequence.start)
#print(sorted_seq)
i = 0
aa_sequence = ""
while i<len(list_of_sequences):
aa_sequence = aa_sequence + list_of_sequences[i].sequence
i = i+2
if (len(list_of_sequences) % 2 == 0):
index = int(list_of_sequences[-1].start) - int(list_of_sequences[-2].end)
aa_sequence = aa_sequence + list_of_sequences[-1].sequence[index:]
self.aa_sequence = aa_sequence
list_of_uniprot_ids = []
list_of_sequences = []
df = pandas.read_csv("{}/data/hits.binarized.fdr_0.15.w_metadata.csv".format(os.path.dirname(os.path.realpath(__file__))))
print(len(df.uniprot_accession))
for num in range(len(df.uniprot_accession)-4):
#for num in range(20000):
print(num)
#print(df.iloc[num])
uniprot_id = df.uniprot_accession[num]
sequence = df.sequence_aa[num]
start = df.peptide_position[num].split("-")[0]
end = df.peptide_position[num].split("-")[1]
list_of_uniprot_ids.append(uniprot_id)
num_hits = 0
for number in df.iloc[num][4:]:
num_hits = num_hits + int(number)
list_of_sequences.append(Sequence(uniprot_id, start, end, sequence, num_hits))
list_of_uniprot_ids = list(set(list_of_uniprot_ids))
list_of_uniprot_groups = []
for uniprot_id in list_of_uniprot_ids:
new_list_of_sequences = []
for seq in list_of_sequences:
if seq.uniprot_id == uniprot_id:
new_list_of_sequences.append(seq)
list_of_uniprot_groups.append(UniprotGroup(new_list_of_sequences))
summary_data = pd.DataFrame()
list_of_rows = []
for sequence in list_of_sequences:
row = [sequence.uniprot_id, sequence.start, sequence.end, sequence.sequence, sequence.num_hits]
list_of_rows.append(row)
df = | pd.DataFrame(list_of_rows,columns=['uniprot_id','start', 'end', 'sequence', 'num_hits']) | pandas.DataFrame |
from datetime import datetime
from io import StringIO
import itertools
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Period,
Series,
Timedelta,
date_range,
)
import pandas._testing as tm
class TestDataFrameReshape:
def test_stack_unstack(self, float_frame):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self, using_array_manager):
# Gh#34708
df = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
df2 = df[["x"]]
df2["y"] = df["y"]
if not using_array_manager:
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("<KEY>")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(list("axa"), categories=list("abc")),
"b": pd.Categorical(list("bcx"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"'fill_value=d' is not present in"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.rand(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442], dtype="int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
tm.assert_frame_equal(df.stack(level=[1, 2]), df.stack(level=1).stack(level=1))
tm.assert_frame_equal(
df.stack(level=[-2, -1]), df.stack(level=1).stack(level=1)
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2]), df_named.stack(level=1).stack(level=1)
)
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1]), exp_hair_stacked, check_names=False
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0]), animal_hair_stacked, check_names=False
)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(np.random.randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=["exp", "animal"])
animal_hair_stacked = df.stack(level=["animal", "hair_length"])
exp_hair_stacked = df.stack(level=["exp", "hair_length"])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 1]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df2.stack(level=[0, 2]), exp_hair_stacked, check_names=False
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1]), animal_hair_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 0]), exp_animal_stacked, check_names=False
)
tm.assert_frame_equal(
df3.stack(level=[2, 1]), exp_hair_stacked, check_names=False
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[np.nan, 0], [0, np.nan], [np.nan, 0], [0, np.nan]], dtype=np.float64
),
index=expected_mi,
columns=Index(["a", "b"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.NaN], "y": [3.0, 4, np.NaN]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("object")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# GH7405
for c, d in (
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
):
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_non_unique_index_names(self):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1")
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([[0, 1], ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
cases = (
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
)
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([[0, 1], col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index1(self):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split("."))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(map(cast, right))
assert left == right
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf["jolie"])
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
for idx in itertools.permutations(["1st", "2nd", "3rd"]):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ["4th", "5th"]:
verify(udf[col])
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self, using_array_manager):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.NaN
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
if using_array_manager:
# INFO(ArrayManager) with ArrayManager preserve dtype where possible
cols = right.columns[[1, 2, 3, 5]]
right[cols] = right[cols].astype(df["C"].dtype)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
@td.skip_array_manager_not_yet_implemented # TODO(ArrayManager) MultiIndex bug
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.randn(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
for multiindex_columns in (
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[[0, 1], ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
dtype=df.dtypes[0],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize("labels", [list("yxz"), list("yxy")])
def test_stack_preserve_categorical_dtype(self, ordered, labels):
# GH13854
cidx = pd.CategoricalIndex(labels, categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(self, ordered, labels, data):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1])
s_cidx = pd.CategoricalIndex(sorted(labels), ordered=ordered)
expected = Series(data, index=MultiIndex.from_product([[0], s_cidx, cidx2]))
tm.assert_series_equal(result, expected)
def test_stack_preserve_categorical_dtype_values(self):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack()
index = MultiIndex.from_product([[0, 1, 2, 3], ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index, columns",
[
([0, 0, 1, 1], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 0, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
([0, 1, 2, 3], MultiIndex.from_product([[1, 2], ["a", "b"]])),
],
)
def test_stack_multi_columns_non_unique_index(self, index, columns):
# GH-28301
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack()
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([[0], ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz")
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_timezone_aware_values():
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack()
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
def test_stack_empty_frame(dropna):
# GH 36113
expected = Series(index=MultiIndex([[], []], [[], []]), dtype=np.float64)
result = DataFrame(dtype=np.float64).stack(dropna=dropna)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value):
# GH 36113
result = (
DataFrame(dtype=np.int64).stack(dropna=dropna).unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
def test_stack_positional_level_duplicate_column_names():
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex.from_tuples([(0, "x"), (0, "y")], names=[None, "a"])
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
class TestStackUnstackMultiLevel:
def test_unstack(self, multiindex_year_month_day_dataframe_random_data):
# just check that it works for now
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack()
unstacked.unstack()
# test that ints work
ymd.astype(int).unstack()
# test that int32 work
ymd.astype(np.int32).unstack()
@pytest.mark.parametrize(
"result_rows,result_columns,index_product,expected_row",
[
(
[[1, 1, None, None, 30.0, None], [2, 2, None, None, 30.0, None]],
["ix1", "ix2", "col1", "col2", "col3", "col4"],
2,
[None, None, 30.0, None],
),
(
[[1, 1, None, None, 30.0], [2, 2, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
2,
[None, None, 30.0],
),
(
[[1, 1, None, None, 30.0], [2, None, None, None, 30.0]],
["ix1", "ix2", "col1", "col2", "col3"],
None,
[None, None, 30.0],
),
],
)
def test_unstack_partial(
self, result_rows, result_columns, index_product, expected_row
):
# check for regressions on this issue:
# https://github.com/pandas-dev/pandas/issues/19351
# make sure DataFrame.unstack() works when its run on a subset of the DataFrame
# and the Index levels contain values that are not present in the subset
result = DataFrame(result_rows, columns=result_columns).set_index(
["ix1", "ix2"]
)
result = result.iloc[1:2].unstack("ix2")
expected = DataFrame(
[expected_row],
columns=MultiIndex.from_product(
[result_columns[2:], [index_product]], names=[None, "ix2"]
),
index=Index([2], name="ix1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multiple_no_empty_columns(self):
index = MultiIndex.from_tuples(
[(0, "foo", 0), (0, "bar", 0), (1, "baz", 1), (1, "qux", 1)]
)
s = Series(np.random.randn(4), index=index)
unstacked = s.unstack([1, 2])
expected = unstacked.dropna(axis=1, how="all")
tm.assert_frame_equal(unstacked, expected)
def test_stack(self, multiindex_year_month_day_dataframe_random_data):
ymd = multiindex_year_month_day_dataframe_random_data
# regular roundtrip
unstacked = ymd.unstack()
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
unlexsorted = ymd.sort_index(level=2)
unstacked = unlexsorted.unstack(2)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted[::-1]
unstacked = unlexsorted.unstack(1)
restacked = unstacked.stack().swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
unlexsorted = unlexsorted.swaplevel(0, 1)
unstacked = unlexsorted.unstack(0).swaplevel(0, 1, axis=1)
restacked = unstacked.stack(0).swaplevel(1, 2)
tm.assert_frame_equal(restacked.sort_index(level=0), ymd)
# columns unsorted
unstacked = ymd.unstack()
unstacked = unstacked.sort_index(axis=1, ascending=False)
restacked = unstacked.stack()
tm.assert_frame_equal(restacked, ymd)
# more than 2 levels in the columns
unstacked = ymd.unstack(1).unstack(1)
result = unstacked.stack(1)
expected = ymd.unstack()
tm.assert_frame_equal(result, expected)
result = unstacked.stack(2)
expected = ymd.unstack(1)
tm.assert_frame_equal(result, expected)
result = unstacked.stack(0)
expected = ymd.stack().unstack(1).unstack(1)
tm.assert_frame_equal(result, expected)
# not all levels present in each echelon
unstacked = ymd.unstack(2).loc[:, ::3]
stacked = unstacked.stack().stack()
ymd_stacked = ymd.stack()
tm.assert_series_equal(stacked, ymd_stacked.reindex(stacked.index))
# stack with negative number
result = ymd.unstack(0).stack(-2)
expected = ymd.unstack(0).stack(0)
# GH10417
def check(left, right):
tm.assert_series_equal(left, right)
assert left.index.is_unique is False
li, ri = left.index, right.index
tm.assert_index_equal(li, ri)
df = DataFrame(
np.arange(12).reshape(4, 3),
index=list("abab"),
columns=["1st", "2nd", "3rd"],
)
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd", "3rd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile(np.arange(3), 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
df.columns = ["1st", "2nd", "1st"]
mi = MultiIndex(
levels=[["a", "b"], ["1st", "2nd"]],
codes=[np.tile(np.arange(2).repeat(3), 2), np.tile([0, 1, 0], 4)],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
tpls = ("a", 2), ("b", 1), ("a", 1), ("b", 2)
df.index = MultiIndex.from_tuples(tpls)
mi = MultiIndex(
levels=[["a", "b"], [1, 2], ["1st", "2nd"]],
codes=[
np.tile(np.arange(2).repeat(3), 2),
np.repeat([1, 0, 1], [3, 6, 3]),
np.tile([0, 1, 0], 4),
],
)
left, right = df.stack(), Series(np.arange(12), index=mi)
check(left, right)
def test_unstack_odd_failure(self):
data = """day,time,smoker,sum,len
Fri,Dinner,No,8.25,3.
Fri,Dinner,Yes,27.03,9
Fri,Lunch,No,3.0,1
Fri,Lunch,Yes,13.68,6
Sat,Dinner,No,139.63,45
Sat,Dinner,Yes,120.77,42
Sun,Dinner,No,180.57,57
Sun,Dinner,Yes,66.82,19
Thu,Dinner,No,3.0,1
Thu,Lunch,No,117.32,44
Thu,Lunch,Yes,51.51,17"""
df = pd.read_csv(StringIO(data)).set_index(["day", "time", "smoker"])
# it works, #2100
result = df.unstack(2)
recons = result.stack()
tm.assert_frame_equal(recons, df)
def test_stack_mixed_dtype(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
df = frame.T
df["foo", "four"] = "foo"
df = df.sort_index(level=1, axis=1)
stacked = df.stack()
result = df["foo"].stack().sort_index()
tm.assert_series_equal(stacked["foo"], result, check_names=False)
assert result.name is None
assert stacked["bar"].dtype == np.float_
def test_unstack_bug(self):
df = DataFrame(
{
"state": ["naive", "naive", "naive", "active", "active", "active"],
"exp": ["a", "b", "b", "b", "a", "a"],
"barcode": [1, 2, 3, 4, 1, 3],
"v": ["hi", "hi", "bye", "bye", "bye", "peace"],
"extra": np.arange(6.0),
}
)
result = df.groupby(["state", "exp", "barcode", "v"]).apply(len)
unstacked = result.unstack()
restacked = unstacked.stack()
tm.assert_series_equal(restacked, result.reindex(restacked.index).astype(float))
def test_stack_unstack_preserve_names(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack()
assert unstacked.index.name == "first"
assert unstacked.columns.names == ["exp", "second"]
restacked = unstacked.stack()
assert restacked.index.names == frame.index.names
@pytest.mark.parametrize("method", ["stack", "unstack"])
def test_stack_unstack_wrong_level_name(
self, method, multiindex_dataframe_random_data
):
# GH 18303 - wrong level name should raise
frame = multiindex_dataframe_random_data
# A DataFrame with flat axes:
df = frame.loc["foo"]
with pytest.raises(KeyError, match="does not match index name"):
getattr(df, method)("mistake")
if method == "unstack":
# Same on a Series:
s = df.iloc[:, 0]
with pytest.raises(KeyError, match="does not match index name"):
getattr(s, method)("mistake")
def test_unstack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
result = frame.unstack("second")
expected = frame.unstack(level=1)
tm.assert_frame_equal(result, expected)
def test_stack_level_name(self, multiindex_dataframe_random_data):
frame = multiindex_dataframe_random_data
unstacked = frame.unstack("second")
result = unstacked.stack("exp")
expected = frame.unstack().stack(0)
tm.assert_frame_equal(result, expected)
result = frame.stack("exp")
expected = frame.stack()
tm.assert_series_equal(result, expected)
def test_stack_unstack_multiple(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
unstacked = ymd.unstack(["year", "month"])
expected = ymd.unstack("year").unstack("month")
tm.assert_frame_equal(unstacked, expected)
assert unstacked.columns.names == expected.columns.names
# series
s = ymd["A"]
s_unstacked = s.unstack(["year", "month"])
| tm.assert_frame_equal(s_unstacked, expected["A"]) | pandas._testing.assert_frame_equal |
from __future__ import annotations
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block_2d,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
from pandas.core.internals.blocks import Block
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
axis1_made_copy = False
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
if ax == 1 and indexer is not None:
axis1_made_copy = True
if copy and concat_axis == 0 and not axis1_made_copy:
# for concat_axis 1 we will always get a copy through concat_arrays
mgr = mgr.copy()
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
# Assertions disabled for performance
# for tup in mgrs_indexers:
# # caller is responsible for ensuring this
# indexers = tup[1]
# assert concat_axis not in indexers
if concat_axis == 0:
return _concat_managers_axis0(mgrs_indexers, axes, copy)
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
# Assertion disabled for performance
# assert all(not x[1] for x in mgrs_indexers)
concat_plans = [_get_mgr_concatenation_plan(mgr) for mgr, _ in mgrs_indexers]
concat_plan = _combine_concat_plans(concat_plans)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
# Assertion disabled for performance
# assert len(join_units) == len(mgrs_indexers)
if len(join_units) == 1:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, ndim=2)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block_2d(values, placement=placement)
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _concat_managers_axis0(
mgrs_indexers, axes: list[Index], copy: bool
) -> BlockManager:
"""
concat_managers specialized to concat_axis=0, with reindexing already
having been done in _maybe_reindex_columns_na_proxy.
"""
had_reindexers = {
i: len(mgrs_indexers[i][1]) > 0 for i in range(len(mgrs_indexers))
}
mgrs_indexers = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers)
mgrs = [x[0] for x in mgrs_indexers]
offset = 0
blocks = []
for i, mgr in enumerate(mgrs):
# If we already reindexed, then we definitely don't need another copy
made_copy = had_reindexers[i]
for blk in mgr.blocks:
if made_copy:
nb = blk.copy(deep=False)
elif copy:
nb = blk.copy()
else:
# by slicing instead of copy(deep=False), we get a new array
# object, see test_concat_copy
nb = blk.getitem_block(slice(None))
nb._mgr_locs = nb._mgr_locs.add(offset)
blocks.append(nb)
offset += len(mgr.items)
return BlockManager(tuple(blocks), axes)
def _maybe_reindex_columns_na_proxy(
axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]]
) -> list[tuple[BlockManager, dict[int, np.ndarray]]]:
"""
Reindex along columns so that all of the BlockManagers being concatenated
have matching columns.
Columns added in this reindexing have dtype=np.void, indicating they
should be ignored when choosing a column's final dtype.
"""
new_mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]] = []
for mgr, indexers in mgrs_indexers:
# For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
# is a cheap reindexing.
for i, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[i],
indexers[i],
axis=i,
copy=False,
only_slice=True, # only relevant for i==0
allow_dups=True,
use_na_proxy=True, # only relevant for i==0
)
new_mgrs_indexers.append((mgr, {}))
return new_mgrs_indexers
def _get_mgr_concatenation_plan(mgr: BlockManager):
"""
Construct concatenation plan for given block manager.
Parameters
----------
mgr : BlockManager
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
# Assertions disabled for performance; these should always hold
# assert placements.is_slice_like
# assert blkno != -1
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
if not unit_no_ax0_reindexing:
# create block from subset of columns
# Note: Blocks with only 1 column will always have unit_no_ax0_reindexing,
# so we will never get here with ExtensionBlock.
blk = blk.getitem_block(ax0_blk_indexer)
# Assertions disabled for performance
# assert blk._mgr_locs.as_slice == placements.as_slice
unit = JoinUnit(blk)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block: Block) -> None:
self.block = block
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)})"
@cache_readonly
def is_na(self) -> bool:
blk = self.block
if blk.dtype.kind == "V":
return True
return False
def get_reindexed_values(self, empty_dtype: DtypeObj) -> ArrayLike:
if self.is_na:
return make_na_array(empty_dtype, self.block.shape)
else:
return self.block.values
def make_na_array(dtype: DtypeObj, shape: Shape) -> ArrayLike:
"""
Construct an np.ndarray or ExtensionArray of the given dtype and shape
holding all-NA values.
"""
if is_datetime64tz_dtype(dtype):
# NaT here is analogous to dtype.na_value below
i8values = np.full(shape, NaT.value)
return DatetimeArray(i8values, dtype=dtype)
elif is_1d_only_ea_dtype(dtype):
dtype = cast(ExtensionDtype, dtype)
cls = dtype.construct_array_type()
missing_arr = cls._from_sequence([], dtype=dtype)
nrows = shape[-1]
taker = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(taker, allow_fill=True, fill_value=dtype.na_value)
elif isinstance(dtype, ExtensionDtype):
# TODO: no tests get here, a handful would if we disabled
# the dt64tz special-case above (which is faster)
cls = dtype.construct_array_type()
missing_arr = cls._empty(shape=shape, dtype=dtype)
missing_arr[:] = dtype.na_value
return missing_arr
else:
# NB: we should never get here with dtype integer or bool;
# if we did, the missing_arr.fill would cast to gibberish
missing_arr = np.empty(shape, dtype=dtype)
fill_value = _dtype_to_na_value(dtype)
missing_arr.fill(fill_value)
return missing_arr
def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
"""
Concatenate values from several join units along axis=1.
"""
empty_dtype = _get_empty_dtype(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype) for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(is_1d_only_ea_dtype(t.dtype) for t in to_concat):
# TODO(EA2D): special case not needed if all EAs used HybridBlocks
# NB: we are still assuming here that Hybrid blocks have shape (1, N)
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
# error: No overload variant of "__getitem__" of "ExtensionArray" matches
# argument type "Tuple[int, slice]"
to_concat = [
t
if is_1d_only_ea_dtype(t.dtype)
else t[0, :] # type: ignore[call-overload]
for t in to_concat
]
concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
concat_values = ensure_block_shape(concat_values, 2)
else:
concat_values = concat_compat(to_concat, axis=1)
return concat_values
def _dtype_to_na_value(dtype: DtypeObj):
"""
Find the NA value to go with this dtype.
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in ["m", "M"]:
return dtype.type("NaT")
elif dtype.kind in ["f", "c"]:
return dtype.type("NaN")
elif dtype.kind == "b":
# different from missing.na_value_for_dtype
return None
elif dtype.kind in ["i", "u"]:
return np.nan
elif dtype.kind == "O":
return np.nan
raise NotImplementedError
def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
"""
if len(join_units) == 1:
blk = join_units[0].block
return blk.dtype
if _is_uniform_reindex(join_units):
empty_dtype = join_units[0].block.dtype
return empty_dtype
needs_can_hold_na = any(unit.is_na for unit in join_units)
dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
dtype = | find_common_type(dtypes) | pandas.core.dtypes.cast.find_common_type |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
def recommendcase(location,quality,compensate,dbcon,number):
"""
location: np.array
quality: np.array
compensate: np.array
dbcon: database connection
number: number of recommended cases
"""
import pandas as pd
caseset = pd.DataFrame(columns= ('caseid','num'))
if location.shape[0]:
for i in range(location.shape[0]):
#match keywords
sql1 = """SELECT caseid ,summary , 1 as num FROM product_dispute.summary_case WHERE summary like '%""" + location[i] + """%'"""
#append dataframe
caseset = caseset.append( | pd.read_sql_query(sql1,db) | pandas.read_sql_query |
import numpy as np
import pandas as pd
import pyarrow as pa
import fletcher as fr
class ArithmeticOps:
def setup(self):
data = np.random.randint(0, 2 ** 20, size=2 ** 24)
self.pd_int = | pd.Series(data) | pandas.Series |
import math
from functools import partial
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn.neighbors import LocalOutlierFactor
from statsmodels.stats.weightstats import DescrStatsW
from tqdm import tqdm
from tqdm.auto import tqdm
from utils import read_images, ParallelCalls, images_animation
class FileAnalysis:
def __init__(self):
self.alpha_dt_range = 20, 100
self.max_mean_jump_xy = 100.
self.max_mean_jump_xyz = 100.
self.min_length = 50
self.t_weight = 10.
self.min_pairs_count = 50
self.smooth = .99
self.num_frames = None
self.intensity_quantile = .5
def get_analysis_for_file(self, file, pixel_size, **analysis):
tq = tqdm(leave=False, total=5, disable=False)
def step(name):
tq.set_description(name)
tq.update()
if 'msds_df' not in analysis:
step('get_localizations')
locs_df = get_localizations(file=file, num_frames=self.num_frames)['localizations']
step('get_registration')
get_registration_result = get_registration(file=file, num_frames=self.num_frames, smooth=self.smooth,
warped_image=False)
homography = get_registration_result['homography']
original_image = get_registration_result['extra_info']['original_image']
step('get_warp_localizations')
locs_warped_df = get_warp_localizations(
locs_df=locs_df,
homographies=homography,
pixel_size=pixel_size,
)
step('get_trajectories')
result = self.get_and_filter_trajectories(locs_warped_df)
trajectories_df = result['trajectories_df']
trajectories_info_df = result['trajectories_info_df']
step('get_msds')
msds_df = self.get_and_filter_msds(trajectories_df)
analysis = dict(
analysis,
msds_df=msds_df,
locs_df=locs_df,
original_image=original_image,
homography=homography,
trajectories_df=trajectories_df,
trajectories_info_df=trajectories_info_df,
locs_warped_df=locs_warped_df,
)
step('get_alphas')
analysis.update(alphas_df=self.get_alphas(analysis['msds_df']))
return analysis
def get_and_filter_msds(self, trajectories_df):
msds_df = get_msds(trajectories_df)['msds_df']
msds_df = self.get_filter_msds(msds_df)
return msds_df
def get_and_filter_trajectories(self, locs_df):
df = locs_df
if self.intensity_quantile:
min_intensity = np.quantile(df['I'], self.intensity_quantile)
df = df[df['I'] >= min_intensity]
result = get_cluster_trajectories(df, t_weight=self.t_weight)
trajectories_df = result['trajectories_df']
trajectories_info_df = get_trajectories_info(trajectories_df)
return dict(
trajectories_df=self.get_filter_trajectories(trajectories_df, trajectories_info_df),
trajectories_info_df=trajectories_info_df
)
def get_filter_trajectories(self, trajectories_df, trajectories_info_df):
def func(mean_jump_xy, mean_jump_xyz, frame_range, length):
return (
(self.max_mean_jump_xy is not None and mean_jump_xy <= self.max_mean_jump_xy)
and (self.min_length is not None and length >= self.min_length)
and (self.max_mean_jump_xyz is not None and mean_jump_xyz <= self.max_mean_jump_xyz)
)
keep = trajectories_info_df.apply(lambda _: func(**_), axis=1)
trajectories_df = trajectories_df.groupby('traj').filter(lambda _: keep[_.name])
return trajectories_df
def get_filter_msds(self, msds_df):
df = msds_df
if self.min_pairs_count:
df = df[df['pairs_count'] >= self.min_pairs_count]
return df
def get_alphas(self, msds_df):
def func(df):
if len(df) < 2:
return
df = df.reset_index()
df = df[df['dt'] > 0]
df_fit = df[df['dt'].between(*self.alpha_dt_range)]
x, y = df_fit['dt'].values, df_fit['msd'].values
p = np.polyfit(np.log(x), np.log(y), 1)
x_pred = df['dt'].values
y_pred = np.exp(np.polyval(p, np.log(x_pred)))
alpha, intercept = p
return pd.Series(dict(alpha=alpha, intercept=intercept, x_fit=x, y_fit=y, x_pred=x_pred, y_pred=y_pred))
alphas_df = msds_df.groupby('traj').apply(func)
alphas_df.attrs = dict(dt_range=self.alpha_dt_range)
return alphas_df
def get_localizations(file, num_frames):
csv_df: pd.DataFrame = | pd.read_csv(file) | pandas.read_csv |
import argparse
from sklearn.metrics import roc_curve, auc
import tensorflow as tf
from tensorflow.python.ops.check_ops import assert_greater_equal_v2
import load_data
from tqdm import tqdm
import numpy as np
import pandas as pd
from math import e as e_VALUE
import tensorflow.keras.backend as Keras_backend
from sklearn.ensemble import RandomForestClassifier
from scipy.special import bdtrc
def func_CallBacks(Dir_Save=''):
mode = 'min'
monitor = 'val_loss'
# checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath= Dir_Save + '/best_model_weights.h5', monitor=monitor , verbose=1, save_best_only=True, mode=mode)
# Reduce_LR = tf.keras.callbacks.ReduceLROnPlateau(monitor=monitor, factor=0.1, min_delta=0.005 , patience=10, verbose=1, save_best_only=True, mode=mode , min_lr=0.9e-5 , )
# CSVLogger = tf.keras.callbacks.CSVLogger(Dir_Save + '/results.csv', separator=',', append=False)
EarlyStopping = tf.keras.callbacks.EarlyStopping( monitor = monitor,
min_delta = 0,
patience = 4,
verbose = 1,
mode = mode,
baseline = 0,
restore_best_weights = True)
return [EarlyStopping] # [checkpointer , EarlyStopping , CSVLogger]
def reading_terminal_inputs():
parser = argparse.ArgumentParser()
parser.add_argument("--epoch" , help="number of epochs")
parser.add_argument("--bsize" , help="batch size")
parser.add_argument("--max_sample" , help="maximum number of training samples")
parser.add_argument("--naug" , help="number of augmentations")
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
parser.add_argument("--architecture_name", help='architecture name')
args = parser.parse_args()
epoch = int(args.epoch) if args.epoch else 3
number_augmentation = int(args.naug) if args.naug else 3
bsize = int(args.bsize) if args.bsize else 100
max_sample = int(args.max_sample) if args.max_sample else 1000
architecture_name = str(args.architecture_name) if args.architecture_name else 'DenseNet121'
return epoch, bsize, max_sample, architecture_name, number_augmentation
def mlflow_settings():
"""
RUN UI with postgres and HPC:
REMOTE postgres server:
# connecting to remote server through ssh tunneling
ssh -L 5000:localhost:5432 <EMAIL>
# using the mapped port and localhost to view the data
mlflow ui --backend-store-uri postgresql://artinmajdi:1234@localhost:5000/chest_db --port 6789
RUN directly from GitHub or show experiments/runs list:
export MLFLOW_TRACKING_URI=http://127.0.0.1:5000
mlflow runs list --experiment-id <id>
mlflow run --no-conda --experiment-id 5 -P epoch=2 https://github.com/artinmajdi/mlflow_workflow.git -v main
mlflow run mlflow_workflow --no-conda --experiment-id 5 -P epoch=2
PostgreSQL server style
server = f'{dialect_driver}://{username}:{password}@{ip}/{database_name}' """
postgres_connection_type = { 'direct': ('5432', 'data7-db1.cyverse.org'),
'ssh-tunnel': ('5000', 'localhost')
}
port, host = postgres_connection_type['ssh-tunnel'] # 'direct' , 'ssh-tunnel'
username = "artinmajdi"
password = '<PASSWORD>'
database_name = "chest_db_v2"
dialect_driver = 'postgresql'
server = f'{dialect_driver}://{username}:{password}@{host}:{port}/{database_name}'
Artifacts = { 'hpc': 'sftp://mohammadsmajdi@file<EMAIL>iz<EMAIL>.<EMAIL>:/home/u29/mohammadsmajdi/projects/mlflow/artifact_store',
'data7_db1': 'sftp://[email protected]:/home/artinmajdi/mlflow_data/artifact_store'} # :temp2_data7_b
return server, Artifacts['data7_db1']
def architecture(architecture_name: str='DenseNet121', input_shape: list=[224,224,3], num_classes: int=14):
input_tensor=tf.keras.layers.Input(input_shape)
if architecture_name == 'custom':
model = tf.keras.layers.Conv2D(4, kernel_size=(3,3), activation='relu')(input_tensor)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(8, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Conv2D(16, kernel_size=(3,3), activation='relu')(model)
model = tf.keras.layers.BatchNormalization()(model)
model = tf.keras.layers.MaxPooling2D(2,2)(model)
model = tf.keras.layers.Flatten()(model)
model = tf.keras.layers.Dense(32, activation='relu')(model)
model = tf.keras.layers.Dense(num_classes , activation='softmax')(model)
return tf.keras.models.Model(inputs=model.input, outputs=[model])
else:
""" Xception VG16 VGG19 DenseNet201
ResNet50 ResNet50V2 ResNet101 DenseNet169
ResNet101V2 ResNet152 ResNet152V2 DenseNet121
InceptionV3 InceptionResNetV2 MobileNet MobileNetV2
if keras_version > 2.4
EfficientNetB0 EfficientNetB1 EfficientNetB2 EfficientNetB3
EfficientNetB4 EfficientNetB5 EfficientNetB6 EfficientNetB7 """
pooling='avg'
weights='imagenet'
include_top=False
if architecture_name == 'xception': model_architecture = tf.keras.applications.Xception
elif architecture_name == 'VGG16': model_architecture = tf.keras.applications.VGG16
elif architecture_name == 'VGG19': model_architecture = tf.keras.applications.VGG19
elif architecture_name == 'ResNet50': model_architecture = tf.keras.applications.ResNet50
elif architecture_name == 'ResNet50V2': model_architecture = tf.keras.applications.ResNet50V2
elif architecture_name == 'ResNet101': model_architecture = tf.keras.applications.ResNet101
elif architecture_name == 'ResNet101V2': model_architecture = tf.keras.applications.ResNet101V2
elif architecture_name == 'ResNet152': model_architecture = tf.keras.applications.ResNet152
elif architecture_name == 'ResNet152V2': model_architecture = tf.keras.applications.ResNet152V2
elif architecture_name == 'InceptionV3': model_architecture = tf.keras.applications.InceptionV3
elif architecture_name == 'InceptionResNetV2': model_architecture = tf.keras.applications.InceptionResNetV2
elif architecture_name == 'MobileNet': model_architecture = tf.keras.applications.MobileNet
elif architecture_name == 'MobileNetV2': model_architecture = tf.keras.applications.MobileNetV2
elif architecture_name == 'DenseNet121': model_architecture = tf.keras.applications.DenseNet121
elif architecture_name == 'DenseNet169': model_architecture = tf.keras.applications.DenseNet169
elif architecture_name == 'DenseNet201': model_architecture = tf.keras.applications.DenseNet201
elif int(list(tf.keras.__version__)[2]) >= 4:
if architecture_name == 'EfficientNetB0': model_architecture = tf.keras.applications.EfficientNetB0
elif architecture_name == 'EfficientNetB1': model_architecture = tf.keras.applications.EfficientNetB1
elif architecture_name == 'EfficientNetB2': model_architecture = tf.keras.applications.EfficientNetB2
elif architecture_name == 'EfficientNetB3': model_architecture = tf.keras.applications.EfficientNetB3
elif architecture_name == 'EfficientNetB4': model_architecture = tf.keras.applications.EfficientNetB4
elif architecture_name == 'EfficientNetB5': model_architecture = tf.keras.applications.EfficientNetB5
elif architecture_name == 'EfficientNetB6': model_architecture = tf.keras.applications.EfficientNetB6
elif architecture_name == 'EfficientNetB7': model_architecture = tf.keras.applications.EfficientNetB7
model = model_architecture( weights = weights,
include_top = include_top,
input_tensor = input_tensor,
input_shape = input_shape,
pooling = pooling) # ,classes=num_classes
KK = tf.keras.layers.Dense( num_classes, activation='sigmoid', name='predictions' )(model.output)
return tf.keras.models.Model(inputs=model.input,outputs=KK)
def weighted_bce_loss(W):
def func_loss(y_true,y_pred):
NUM_CLASSES = y_pred.shape[1]
loss = 0
for d in range(NUM_CLASSES):
y_true = tf.cast(y_true, tf.float32)
mask = tf.keras.backend.cast( tf.keras.backend.not_equal(y_true[:,d], -5),
tf.keras.backend.floatx() )
loss += W[d]*tf.keras.losses.binary_crossentropy( y_true[:,d] * mask,
y_pred[:,d] * mask )
return tf.divide( loss, tf.cast(NUM_CLASSES,tf.float32) )
return func_loss
def optimize(dir, train_dataset, valid_dataset, epochs, Info, architecture_name):
# architecture
model = architecture( architecture_name = architecture_name,
input_shape = list(Info.target_size) + [3] ,
num_classes = len(Info.pathologies) )
model.compile( optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
loss = weighted_bce_loss(Info.class_weights), # tf.keras.losses.binary_crossentropy
metrics = [tf.keras.metrics.binary_accuracy] )
# optimization
history = model.fit( train_dataset,
validation_data = valid_dataset,
epochs = epochs,
steps_per_epoch = Info.steps_per_epoch,
validation_steps = Info.validation_steps,
verbose = 1,
use_multiprocessing = True) # ,callbacks=func_CallBacks(dir + '/model')
# saving the optimized model
model.save( dir + '/model/model.h5',
overwrite = True,
include_optimizer = False )
return model
def evaluate(dir: str, dataset: str='chexpert', batch_size: int=1000, model=tf.keras.Model()):
# Loading the data
Data, Info = load_data.load_chest_xray( dir = dir,
dataset = dataset,
batch_size = batch_size,
mode = 'test' )
score = measure_loss_acc_on_test_data( generator = Data.generator['test'],
model = model,
pathologies = Info.pathologies )
return score
def measure_loss_acc_on_test_data(generator, model, pathologies):
# Looping over all test samples
score_values = {}
NUM_CLASSES = len(pathologies)
generator.reset()
for j in tqdm(range(len(generator.filenames))):
x_test, y_test = next(generator)
full_path, x,y = generator.filenames[j] , x_test[0,...] , y_test[0,...]
x,y = x[np.newaxis,:] , y[np.newaxis,:]
# Estimating the loss & accuracy for instance
eval = model.evaluate(x=x, y=y,verbose=0,return_dict=True)
# predicting the labels for instance
pred = model.predict(x=x,verbose=0)
# Measuring the loss for each class
loss_per_class = [ tf.keras.losses.binary_crossentropy(y[...,d],pred[...,d]) for d in range(NUM_CLASSES)]
# saving all the infos
score_values[full_path] = {'full_path':full_path,'loss_avg':eval['loss'], 'acc_avg':eval['binary_accuracy'], 'pred':pred[0], 'pred_binary':pred[0] > 0.5, 'truth':y[0]>0.5, 'loss':np.array(loss_per_class), 'pathologies':pathologies}
# converting the outputs into panda dataframe
df = pd.DataFrame.from_dict(score_values).T
# resetting the index to integers
df.reset_index(inplace=True)
# # dropping the old index column
df = df.drop(['index'],axis=1)
return df
class Parent_Child():
def __init__(self, subj_info: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
"""
subject_info = {'pred':[], 'loss':[], 'pathologies':['Edema','Cardiomegaly',...]}
1. After creating a class:
SPC = Parent_Child(loss_dict, pred_dict, technique)
2. Update the parent child relationship:
SPC.set_parent_child_relationship(parent_name1, child_name_list1)
SPC.set_parent_child_relationship(parent_name2, child_name_list2)
3. Then update the loss and probabilities
SPC.update_loss_pred()
4. In order to see the updated loss and probabilities use below
loss_new_list = SPC.loss_dict_weighted or SPC.loss_list_weighted
pred_new_list = SPC.pred_dict_weighted or SPC.predlist_weighted
IMPORTANT NOTE:
If there are more than 2 generation; it is absolutely important to enter the subjects in order of seniority
gen1: grandparent (gen1)
gen1_subjx_children: parent (gen2)
gen2_subjx_children: child (gen3)
SPC = Parent_Child(loss_dict, pred_dict, technique)
SPC.set_parent_child_relationship(gen1_subj1, gen1_subj1_children)
SPC.set_parent_child_relationship(gen1_subj2, gen1_subj2_children)
. . .
SPC.set_parent_child_relationship(gen2_subj1, gen2_subj1_children)
SPC.set_parent_child_relationship(gen2_subj2, gen2_subj2_children)
. . .
SPC.update_loss_pred()
"""
self.subj_info = subj_info
self.technique = technique
self.all_parents: dict = {}
self.tuning_variables = tuning_variables
self.loss = subj_info.loss
self.pred = subj_info.pred
self.truth = subj_info.truth
self._convert_inputs_list_to_dict()
def _convert_inputs_list_to_dict(self):
self.loss_dict = {disease:self.subj_info.loss[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.pred_dict = {disease:self.subj_info.pred[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.truth_dict = {disease:self.subj_info.truth[index] for index,disease in enumerate(self.subj_info.pathologies)}
self.loss_dict_weighted = self.loss_dict
self.pred_dict_weighted = self.pred_dict
def set_parent_child_relationship(self, parent_name: str='parent_name', child_name_list: list=[]):
self.all_parents[parent_name] = child_name_list
def update_loss_pred(self):
"""
techniques:
1: coefficinet = (1 + parent_loss)
2: coefficinet = (2 * parent_pred)
3: coefficient = (2 * parent_pred)
1: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
2: loss_new = loss_old * coefficient if parent_pred < 0.5 else loss_old
3. loss_new = loss_old * coefficient
"""
for parent_name in self.all_parents:
self._update_loss_for_children(parent_name)
self._convert_outputs_to_list()
def _convert_outputs_to_list(self):
self.loss_new = np.array([self.loss_dict_weighted[disease] for disease in self.subj_info.pathologies])
self.pred_new = np.array([self.pred_dict_weighted[disease] for disease in self.subj_info.pathologies])
def _update_loss_for_children(self, parent_name: str='parent_name'):
parent_loss = self.loss_dict_weighted[parent_name]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
TV = self.tuning_variables[ self.technique ]
if TV['mode'] == 'truth': parent_truth_pred = parent_truth
elif TV['mode'] == 'pred': parent_truth_pred = parent_pred
else: parent_truth_pred = 1.0
if self.technique == 1: coefficient = TV['weight'] * parent_loss + TV['bias']
elif self.technique == 2: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
elif self.technique == 3: coefficient = TV['weight'] * parent_truth_pred + TV['bias']
for child_name in self.all_parents[parent_name]:
new_child_loss = self._measure_new_child_loss(coefficient, parent_name, child_name)
self.loss_dict_weighted[child_name] = new_child_loss
self.pred_dict_weighted[child_name] = 1 - np.power(e_VALUE , -new_child_loss)
self.pred_dict[child_name] = 1 - np.power(e_VALUE , -self.loss_dict[child_name])
def _measure_new_child_loss(self, coefficient: float=0.0, parent_name: str='parent_name', child_name: str='child_name'):
TV = self.tuning_variables[ self.technique ]
parent_pred = self.pred_dict_weighted[parent_name]
parent_truth = self.truth_dict[parent_name]
if TV['mode'] == 'truth': loss_activated = (parent_truth < 0.5 )
elif TV['mode'] == 'pred': loss_activated = (parent_pred < TV['parent_pred_threshold'] )
else: loss_activated = True
old_child_loss = self.loss_dict_weighted[child_name]
if self.technique == 1: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 2: new_child_loss = old_child_loss * coefficient if loss_activated else old_child_loss
elif self.technique == 3: new_child_loss = old_child_loss * coefficient
return new_child_loss
class Measure_InterDependent_Loss_Aim1_1(Parent_Child):
def __init__(self,score: pd.DataFrame.dtypes={}, technique: int=0, tuning_variables: dict={}):
score['loss_new'] = score['loss']
score['pred_new'] = score['pred']
self.score = score
self.technique = technique
for subject_ix in tqdm(self.score.index):
Parent_Child.__init__(self, subj_info=self.score.loc[subject_ix], technique=technique, tuning_variables=tuning_variables)
self.set_parent_child_relationship(parent_name='Lung Opacity' , child_name_list=['Pneumonia', 'Atelectasis','Consolidation','Lung Lesion', 'Edema'])
self.set_parent_child_relationship(parent_name='Enlarged Cardiomediastinum', child_name_list=['Cardiomegaly'])
self.update_loss_pred()
self.score.loss_new.loc[subject_ix] = self.loss_new
self.score.pred_new.loc[subject_ix] = self.pred_new
def apply_new_loss_techniques_aim1_1(pathologies: list=[], score: pd.DataFrame.dtypes={}, tuning_variables: dict={}):
L = len(pathologies)
accuracies = np.zeros((4,L))
measured_auc = np.zeros((4,L))
FR = list(np.zeros(4))
for technique in range(4):
# extracting the ouput predictions
if technique == 0:
FR[technique] = score
output = score.pred
else:
FR[technique] = Measure_InterDependent_Loss_Aim1_1(score=score, technique=technique, tuning_variables=tuning_variables)
output = FR[technique].score.pred_new
# Measuring accuracy
func = lambda x1, x2: [ (x1[j] > 0.5) == (x2[j] > 0.5) for j in range(len(x1))]
pred_acc = score.truth.combine(output,func=func).to_list()
pred_acc = np.array(pred_acc).mean(axis=0)
prediction_table = np.stack(score.pred)
truth_table = np.stack(score.truth)
for d in range(prediction_table.shape[1]):
fpr, tpr, thresholds = roc_curve(truth_table[:,d], prediction_table[:,d], pos_label=1)
measured_auc[technique, d] = auc(fpr, tpr)
accuracies[technique,:] = np.floor( pred_acc*1000 ) / 10
class Outputs:
def __init__(self,accuracies, measured_auc, FR, pathologies):
self.accuracy = self._converting_to_dataframe(input_table=accuracies , columns=pathologies)
self.auc = self._converting_to_dataframe(input_table=measured_auc, columns=pathologies)
self.details = FR
self.pathologies = pathologies
def _converting_to_dataframe(self, input_table, columns):
df = pd.DataFrame(input_table, columns=columns)
df['technique'] = ['original','1','2','3']
df = df.set_index('technique').T
return df
return Outputs(accuracies=accuracies, measured_auc=measured_auc, FR=FR,pathologies=pathologies)
def apply_nan_back_to_truth(truth, how_to_treat_nans):
# changing teh samples with uncertain truth label to nan
truth[ truth == -10] = np.nan
# how to treat the nan labels in the original dataset before measuring the average accuracy
if how_to_treat_nans == 'ignore': truth[ truth == -5] = np.nan
elif how_to_treat_nans == 'pos': truth[ truth == -5] = 1
elif how_to_treat_nans == 'neg': truth[ truth == -5] = 0
return truth
def measure_mean_accruacy_chexpert(truth, prediction, how_to_treat_nans):
""" prediction & truth: num_samples x num_classes """
pred_classes = prediction > 0.5
# truth_nan_applied = self._truth_with_nan_applied()
truth_nan_applied = apply_nan_back_to_truth(truth=truth, how_to_treat_nans=how_to_treat_nans)
# measuring the binary truth labels (the nan samples will be fixed below)
truth_binary = truth_nan_applied > 0.5
truth_pred_compare = (pred_classes == truth_binary).astype(float)
# replacing the nan samples back to their nan value
truth_pred_compare[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
accuracy = np.nanmean(truth_pred_compare, axis=0)*100
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
accuracy[np.isnan(accuracy)] = 0
accuracy = (accuracy*10).astype(int)/10
return accuracy
def measure_mean_uncertainty_chexpert(truth=np.array([]), uncertainty=np.array([]), how_to_treat_nans='ignore'):
""" uncertainty & truth: num_samples x num_classes """
# adding the nan values back to arrays
truth_nan_applied = apply_nan_back_to_truth(truth, how_to_treat_nans)
# replacing the nan samples back to their nan value
uncertainty[np.where(np.isnan(truth_nan_applied))] = np.nan
# measuring teh average accuracy over all samples after ignoring the nan samples
uncertainty_mean = np.nanmean(uncertainty , axis=0)
# this is for safety measure; in case one of the classes overall accuracy was also nan. if removed, then the integer format below will change to very long floats
uncertainty_mean[np.isnan(uncertainty_mean)] = 0
uncertainty_mean = (uncertainty_mean*1000).astype(int)/1000
return uncertainty_mean
class Measure_Accuracy_Aim1_2():
def __init__(self, predict_accuracy_mode: bool=False , model: tf.keras.models.Model.dtype='' , generator=tf.keras.preprocessing.image.ImageDataGenerator() , how_to_treat_nans: str='ignore', uncertainty_type: str='std'):
"""
how_to_treat_nans:
ignore: ignoring the nan samples when measuring the average accuracy
pos: if integer number, it'll treat as postitive
neg: if integer number, it'll treat as negative """
self.predict_accuracy_mode = predict_accuracy_mode
self.how_to_treat_nans = how_to_treat_nans
self.generator = generator
self.model = model
self.uncertainty_type = uncertainty_type
self._setting_params()
def _setting_params(self):
self.full_data_length, self.num_classes = self.generator.labels.shape
self.batch_size = self.generator.batch_size
self.number_batches = int(np.ceil(self.full_data_length/self.batch_size))
self.truth = self.generator.labels.astype(float)
def loop_over_whole_dataset(self):
probs = np.zeros(self.generator.labels.shape)
# Looping over all batches
# Keras_backend.clear_session()
self.generator.reset()
np.random.seed(1)
for batch_index in tqdm(range(self.number_batches),disable=False):
# extracting the indexes for batch "batch_index"
self.generator.batch_index = batch_index
indexes = next(self.generator.index_generator)
# print(' extracting data -------')
self.generator.batch_index = batch_index
x, _ = next(self.generator)
# print(' predicting the labels -------')
probs[indexes,:] = self.model.predict(x,verbose=0)
# Measuring the accuracy over whole augmented dataset
if self.predict_accuracy_mode:
accuracy = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=probs.copy(), how_to_treat_nans=self.how_to_treat_nans)
return probs, accuracy
def loop_over_all_augmentations(self,number_augmentation: int=0):
self.number_augmentation = number_augmentation
self.probs_all_augs_3d = np.zeros((1 + number_augmentation , self.full_data_length , self.num_classes))
self.accuracy_all_augs_3d = np.zeros((1 + number_augmentation , self.num_classes))
# Looping over all augmentation scenarios
for ix_aug in range(number_augmentation):
print(f'augmentation {ix_aug}/{number_augmentation}')
probs, accuracy = self.loop_over_whole_dataset()
self.probs_all_augs_3d[ ix_aug,...] = probs
self.accuracy_all_augs_3d[ix_aug,...] = accuracy
# measuring the average probability over all augmented data
self.probs_avg_2d = np.mean( self.probs_all_augs_3d, axis=0)
if self.uncertainty_type == 'std':
self.probs_std_2d = np.std(self.probs_all_augs_3d, axis=0)
# Measuring the accruacy for new estimated probability for each sample over all augmented data
# self.accuracy_final = self._measure_mean_accruacy(self.probs_avg_2d)
# self.uncertainty_final = self._measure_mean_std(self.probs_std_2d)
self.accuracy_final = measure_mean_accruacy_chexpert(truth=self.truth.copy(), prediction=self.probs_avg_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
self.uncertainty_final = measure_mean_uncertainty_chexpert(truth=self.truth.copy(), uncertainty=self.probs_std_2d.copy(), how_to_treat_nans=self.how_to_treat_nans)
def apply_technique_aim_1_2(how_to_treat_nans='ignore', data_generator='', data_generator_aug='', model='', number_augmentation=3, uncertainty_type='std'):
print('running the evaluation on original non-augmented data')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
probs_2d_orig, old_accuracy = MA.loop_over_whole_dataset()
print(' running the evaluation on augmented data including the uncertainty measurement')
MA = Measure_Accuracy_Aim1_2( predict_accuracy_mode = True,
generator = data_generator_aug,
model = model,
how_to_treat_nans = how_to_treat_nans,
uncertainty_type = uncertainty_type)
MA.loop_over_all_augmentations(number_augmentation=number_augmentation)
final_results = { 'old-accuracy': old_accuracy,
'new-accuracy': MA.accuracy_final,
'std' : MA.uncertainty_final}
return probs_2d_orig, final_results, MA
def estimate_maximum_and_change(all_accuracies=np.array([]), pathologies=[]):
columns = ['old-accuracy', 'new-accuracy', 'std']
# creating a dataframe from accuracies
df = pd.DataFrame(all_accuracies , index=pathologies)
# adding the 'maximum' & 'change' columns
df['maximum'] = df.columns[ df.values.argmax(axis=1) ]
df['change'] = df[columns[1:]].max(axis=1) - df[columns[0]]
# replacing "0" values to "--" for readability
df.maximum[df.change==0.0] = '--'
df.change[df.change==0.0] = '--'
return df
# def apply_technique_aim_1_2_with_dataframe(how_to_treat_nans='ignore', pathologies=[], data_generator='', data_generator_aug='', model='', uncertainty_type='std'):
# outputs, MA = apply_technique_aim_1_2(how_to_treat_nans=how_to_treat_nans, data_generator=data_generator, data_generator_aug=data_generator_aug, model=model, uncertainty_type=uncertainty_type)
# df = estimate_maximum_and_change(all_accuracies=outputs, pathologies=pathologies)
# return df, outputs, MA
""" crowdsourcing technique aim 1_3 """
def apply_technique_aim_1_3(data={}, num_simulations=20, feature_columns=[], ARLS={}):
def assigning_worker_true_labels(seed_num=1, true=[], labelers_strength=0.5):
# setting the random seed
# np.random.seed(seed_num)
# number of samples and labelers/workers
num_samples = true.shape[0]
# finding a random number for each instance
true_label_assignment_prob = np.random.random(num_samples)
# samples that will have an inaccurate true label
false_samples = true_label_assignment_prob < 1 - labelers_strength
# measuring the new labels for each labeler/worker
worker_true = true > 0.5
worker_true[ false_samples ] = ~ worker_true[ false_samples ]
return worker_true
def assigning_random_labelers_strengths(num_labelers=10, low_dis=0.3, high_dis=0.9):
labeler_names = [f'labeler_{j}' for j in range(num_labelers)]
# if num_labelers > 1:
# ls1 = np.random.uniform( low = 0.1,
# high = 0.3,
# size = int(num_labelers/2))
# ls2 = np.random.uniform( low = 0.7,
# high = 0.9,
# size = num_labelers - int(num_labelers/2))
# labelers_strength = np.concatenate((ls1 , ls2),axis=0)
# else:
labelers_strength = np.random.uniform( low = low_dis,
high = high_dis,
size = num_labelers)
return pd.DataFrame( {'labelers_strength': labelers_strength}, index = labeler_names)
# TODO I should repeate this for multiple seed and average
np.random.seed(11)
# setting a random strength for each labeler/worker
labelers_strength = assigning_random_labelers_strengths( num_labelers = ARLS['num_labelers'],
low_dis = ARLS['low_dis'],
high_dis = ARLS['high_dis'])
predicted_labels_all_sims = {'train':{}, 'test':{}}
true_labels = {'train':pd.DataFrame(), 'test':pd.DataFrame()}
uncertainty = {'train':pd.DataFrame(), 'test': | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2013, ElasticRun and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from croniter import croniter
from datetime import datetime
import pandas as pd
from datetime import timedelta
import pytz
def execute(filters=None, as_df=False):
filters = filters or {}
run_date = filters.get('run_date', frappe.utils.nowdate())
status = filters.get('status')
hooks = frappe.get_hooks('scheduler_events')
events = hooks.get('daily') + hooks.get('daily_long')
all_jobs = []
for event in events:
all_jobs.append(['0 0 * * *', event])
for key, jobs in hooks.get('cron').items():
for row in jobs:
all_jobs.append([key, row])
now = frappe.utils.now_datetime()
filtered_jobs = []
ensure_run_for = frappe.get_all('Job Watchman', fields=['method', 'expected_run_time'])
ensure_run_for = {
row.method: row for row in ensure_run_for
}
'''
[
expected_status,
actual_status,
expected_start,
actual_start,
]
'''
method_names = set()
for cron_config, job in all_jobs:
if job not in ensure_run_for:
continue
method_names.add(job)
# job_line = frappe._dict()
# status_line = [''] * 4
status_line = frappe._dict()
prev_run = croniter(cron_config, now).get_prev(datetime)
status_line.method = job
if str(prev_run.date()) == str(now.date()):
status_line.expected_status = 'Scheduled'
status_line.expected_start = prev_run
status_line.expected_finish = prev_run + timedelta(minutes=ensure_run_for.get(job).expected_run_time)
else:
next_run = croniter(cron_config, now).get_next(datetime)
if str(next_run.date()) == str(now.date()):
status_line.expected_status = 'Will be scheduled'
else:
status_line.expected_status = 'Not Scheduled for Today'
filtered_jobs.append(status_line)
if not method_names:
return [], []
job_df = | pd.DataFrame.from_records(filtered_jobs, index=['method']) | pandas.DataFrame.from_records |
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from utils import *
import os
import argparse
from pathlib import Path
from collections import Counter
class kMeansClustering:
def __init__(self):
#define variable names
self.dict_data = { "1":"Sports" ,
"2":"Religious",
"3":"Theatre",
"4":"Shopping",
"5":"Picnic",}
def process_kMeans(self,n_clusters, data):
kmeans = KMeans(n_clusters=int(n_clusters)).fit(data)
return kmeans
def read_and_select_data(self, data_path):
#read data
df = pd.read_csv(data_path)
#get variable from user
print("selectable variables:\n1-Sports\n2-Religious\n3-Theatre\n4-Shopping\n5-Picnic\n")
x, y = input("Select x and y variable: ").split()
n_clusters = input("Select cluster number:")
# filter selected variables from dataset
data = df[[self.dict_data[x], self.dict_data[y]]]
return data, x, y, n_clusters
def visualize(self, kmeans, data, labels, x, y, save_path):
#visualize results
fig, ax = plt.subplots(figsize=(6.4, 6.4))
sns.scatterplot( x=data[data.columns[0]], y=data[data.columns[1]], hue=labels)
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1],
marker="X", c="r", s=80, label="centroids")
ax.set(
title=self.dict_data[x] +" - "+ self.dict_data[y],
facecolor='white'
)
# plt.legend()
# plt.show()
canvas = FigureCanvas(fig)
canvas.draw()
buf = canvas.buffer_rgba()
image = np.asarray(buf)
plt.imsave(save_path, image)
plt.close("all")
def calculate_evalualion_metrics(self, data, kmeans, save_results_path):
dunn_index = calcDunnIndex(data.values, kmeans.cluster_centers_)
# wcss = kmeans.inertia_
wcss = calculateWCSS(data.values, kmeans.cluster_centers_, kmeans.labels_)
bcss = calculateBCSS(kmeans.cluster_centers_)
evaluation_metrics= ["dunn_index", "wcss", "bcss"]
evaluation_values = [dunn_index, wcss, bcss]
classes = [ "Class_"+str(cluster) for cluster in kmeans.labels_ ]
records = [ "Record_"+str(cluster) for cluster in range(1, len(data)+1) ]
count_dict = Counter(kmeans.labels_)
count_dict_ordered = dict(sorted(count_dict.items(), key=lambda x: x[0]))
c_names = ["Class_"+str(cluster) for cluster in count_dict_ordered.keys()]
c_counts = [str(cluster)+" Records" for cluster in count_dict_ordered.values()]
df_1 = pd.DataFrame(zip(records, classes))
df_2 = pd.DataFrame(zip(c_names, c_counts))
df_3 = pd.DataFrame(zip(evaluation_metrics, evaluation_values))
result_df = | pd.concat([df_1, df_2, df_3]) | pandas.concat |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = | DataFrame({'A': [0, 0], 'B': [0, np.nan]}) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_setitem_listlike_indexer_duplicate_columns_not_equal_length(self):
# GH#39403
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11]], columns=["a", "b"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df[["a", "b"]] = rhs
def test_setitem_intervals(self):
df = DataFrame({"A": range(10)})
ser = cut(df["A"], 5)
assert isinstance(ser.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainder are converted to in-line objects
# containing an IntervalIndex.values
df["B"] = ser
df["C"] = np.array(ser)
df["D"] = ser.values
df["E"] = np.array(ser.values)
df["F"] = ser.astype(object)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
# These go through the Series constructor and so get inferred back
# to IntervalDtype
assert is_interval_dtype(df["C"])
assert is_interval_dtype(df["E"])
# But the Series constructor doesn't do inference on Series objects,
# so setting df["F"] doesn't get cast back to IntervalDtype
assert is_object_dtype(df["F"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B))
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.C), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"])
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"])
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_setitem_categorical(self):
# GH#35369
df = DataFrame({"h": Series(list("mn")).astype("category")})
df.h = df.h.cat.reorder_categories(["n", "m"])
expected = DataFrame(
{"h": Categorical(["m", "n"]).reorder_categories(["n", "m"])}
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_empty_listlike(self):
# GH#17101
index = Index([], name="idx")
result = DataFrame(columns=["A"], index=index)
result["A"] = []
expected = DataFrame(columns=["A"], index=index)
tm.assert_index_equal(result.index, expected.index)
@pytest.mark.parametrize(
"cols, values, expected",
[
(["C", "D", "D", "a"], [1, 2, 3, 4], 4), # with duplicates
(["D", "C", "D", "a"], [1, 2, 3, 4], 4), # mixed order
(["C", "B", "B", "a"], [1, 2, 3, 4], 4), # other duplicate cols
(["C", "B", "a"], [1, 2, 3], 3), # no duplicates
(["B", "C", "a"], [3, 2, 1], 1), # alphabetical order
(["C", "a", "B"], [3, 2, 1], 2), # in the middle
],
)
def test_setitem_same_column(self, cols, values, expected):
# GH#23239
df = DataFrame([values], columns=cols)
df["a"] = df["a"]
result = df["a"].values[0]
assert result == expected
def test_setitem_multi_index(self):
# GH#7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ["jim", "joe", "jolie"], ["first", "last"], ["left", "center", "right"]
cols = MultiIndex.from_product(it)
index = date_range("20141006", periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df["jim"] = df["jolie"].loc[i, ::-1]
tm.assert_frame_equal(df["jim"], df["jolie"])
np.random.shuffle(j)
df[("joe", "first")] = df[("jolie", "last")].loc[i, j]
tm.assert_frame_equal(df[("joe", "first")], df[("jolie", "last")])
np.random.shuffle(j)
df[("joe", "last")] = df[("jolie", "first")].loc[i, j]
tm.assert_frame_equal(df[("joe", "last")], df[("jolie", "first")])
@pytest.mark.parametrize(
"columns,box,expected",
[
(
["A", "B", "C", "D"],
7,
DataFrame(
[[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "D"],
[7, 8],
DataFrame(
[[1, 2, 7, 8], [3, 4, 7, 8], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "B", "C"],
np.array([7, 8, 9], dtype=np.int64),
DataFrame([[7, 8, 9], [7, 8, 9], [7, 8, 9]], columns=["A", "B", "C"]),
),
(
["B", "C", "D"],
[[7, 8, 9], [10, 11, 12], [13, 14, 15]],
DataFrame(
[[1, 7, 8, 9], [3, 10, 11, 12], [5, 13, 14, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["C", "A", "D"],
np.array([[7, 8, 9], [10, 11, 12], [13, 14, 15]], dtype=np.int64),
DataFrame(
[[8, 2, 7, 9], [11, 4, 10, 12], [14, 6, 13, 15]],
columns=["A", "B", "C", "D"],
),
),
(
["A", "C"],
DataFrame([[7, 8], [9, 10], [11, 12]], columns=["A", "C"]),
DataFrame(
[[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=["A", "B", "C"]
),
),
],
)
def test_setitem_list_missing_columns(self, columns, box, expected):
# GH#29334
df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=["A", "B"])
df[columns] = box
tm.assert_frame_equal(df, expected)
def test_setitem_list_of_tuples(self, float_frame):
tuples = list(zip(float_frame["A"], float_frame["B"]))
float_frame["tuples"] = tuples
result = float_frame["tuples"]
expected = Series(tuples, index=float_frame.index, name="tuples")
tm.assert_series_equal(result, expected)
def test_setitem_iloc_generator(self):
# GH#39614
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer] = 1
expected = DataFrame({"a": [1, 1, 1], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
def test_setitem_iloc_two_dimensional_generator(self):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
indexer = (x for x in [1, 2])
df.iloc[indexer, 1] = 1
expected = DataFrame({"a": [1, 2, 3], "b": [4, 1, 1]})
tm.assert_frame_equal(df, expected)
class TestSetitemTZAwareValues:
@pytest.fixture
def idx(self):
naive = DatetimeIndex(["2013-1-1 13:00", "2013-1-2 14:00"], name="B")
idx = naive.tz_localize("US/Pacific")
return idx
@pytest.fixture
def expected(self, idx):
expected = Series(np.array(idx.tolist(), dtype="object"), name="B")
assert expected.dtype == idx.dtype
return expected
def test_setitem_dt64series(self, idx, expected):
# convert to utc
df = DataFrame(np.random.randn(2, 1), columns=["A"])
df["B"] = idx
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = df["B"]
comp = Series(idx.tz_convert("UTC").tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
def test_setitem_datetimeindex(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
def test_setitem_object_array_of_tzaware_datetimes(self, idx, expected):
# setting a DataFrame column with a tzaware DTI retains the dtype
df = DataFrame(np.random.randn(2, 1), columns=["A"])
# object array of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
class TestDataFrameSetItemWithExpansion:
# TODO(ArrayManager) update parent (_maybe_update_cacher)
@td.skip_array_manager_not_yet_implemented
def test_setitem_listlike_views(self):
# GH#38148
df = DataFrame({"a": [1, 2, 3], "b": [4, 4, 6]})
# get one column as a view of df
ser = df["a"]
# add columns with list-like indexer
df[["c", "d"]] = np.array([[0.1, 0.2], [0.3, 0.4], [0.4, 0.5]])
# edit in place the first column to check view semantics
df.iloc[0, 0] = 100
expected = Series([100, 2, 3], name="a")
tm.assert_series_equal(ser, expected)
def test_setitem_string_column_numpy_dtype_raising(self):
# GH#39010
df = DataFrame([[1, 2], [3, 4]])
df["0 - Name"] = [5, 6]
expected = DataFrame([[1, 2, 5], [3, 4, 6]], columns=[0, 1, "0 - Name"])
tm.assert_frame_equal(df, expected)
def test_setitem_empty_df_duplicate_columns(self):
# GH#38521
df = DataFrame(columns=["a", "b", "b"], dtype="float64")
df.loc[:, "a"] = list(range(2))
expected = DataFrame(
[[0, np.nan, np.nan], [1, np.nan, np.nan]], columns=["a", "b", "b"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_with_expansion_categorical_dtype(self):
# assignment
df = DataFrame(
{"value": np.array(np.random.randint(0, 10000, 100), dtype="int32")}
)
labels = Categorical([f"{i} - {i + 499}" for i in range(0, 10000, 500)])
df = df.sort_values(by=["value"], ascending=True)
ser = cut(df.value, range(0, 10500, 500), right=False, labels=labels)
cat = ser.values
# setting with a Categorical
df["D"] = cat
str(df)
result = df.dtypes
expected = Series(
[np.dtype("int32"), CategoricalDtype(categories=labels, ordered=False)],
index=["value", "D"],
)
tm.assert_series_equal(result, expected)
# setting with a Series
df["E"] = ser
str(df)
result = df.dtypes
expected = Series(
[
np.dtype("int32"),
CategoricalDtype(categories=labels, ordered=False),
CategoricalDtype(categories=labels, ordered=False),
],
index=["value", "D", "E"],
)
tm.assert_series_equal(result, expected)
result1 = df["D"]
result2 = df["E"]
tm.assert_categorical_equal(result1._mgr.array, cat)
# sorting
ser.name = "E"
tm.assert_series_equal(result2.sort_index(), ser.sort_index())
def test_setitem_scalars_no_index(self):
# GH#16823 / GH#17894
df = DataFrame()
df["foo"] = 1
expected = DataFrame(columns=["foo"]).astype(np.int64)
tm.assert_frame_equal(df, expected)
def test_setitem_newcol_tuple_key(self, float_frame):
assert (
"A",
"B",
) not in float_frame.columns
float_frame["A", "B"] = float_frame["A"]
assert ("A", "B") in float_frame.columns
result = float_frame["A", "B"]
expected = float_frame["A"]
tm.assert_series_equal(result, expected, check_names=False)
def test_frame_setitem_newcol_timestamp(self):
# GH#2155
columns = date_range(start="1/1/2012", end="2/1/2012", freq=BDay())
data = DataFrame(columns=columns, index=range(10))
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works, mostly a smoke-test
assert np.isnan(data[ts]).all()
class TestDataFrameSetItemSlicing:
def test_setitem_slice_position(self):
# GH#31469
df = DataFrame(np.zeros((100, 1)))
df[-4:] = 1
arr = np.zeros((100, 1))
arr[-4:] = 1
expected = DataFrame(arr)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.setitem, tm.iloc])
@pytest.mark.parametrize("box", [Series, np.array, list])
@pytest.mark.parametrize("n", [1, 2, 3])
def test_setitem_broadcasting_rhs(self, n, box, indexer):
# GH#40440
# TODO: Add pandas array as box after GH#40933 is fixed
df = | DataFrame([[1, 3, 5]] + [[2, 4, 6]] * n, columns=["a", "b", "c"]) | pandas.DataFrame |
from operator import itemgetter
import os
import json
import logging
from datetime import datetime
from typing import Iterable, List, Optional
from marshmallow.fields import Field
from pytz import timezone
from pathlib import Path
from flask_restx.fields import MarshallingError, Raw
from datetime import datetime, time
from webargs.core import ArgMap, Parser
from werkzeug.routing import BaseConverter, ValidationError
from geopy.geocoders import Nominatim
import pandas as pd
import Levenshtein as lev
from .config import settings
from .reader import str_to_date
logger = logging.getLogger(__name__)
DZ = timezone('Africa/Algiers')
def today(tz=DZ):
dt_now = datetime.now(tz=tz)
today = dt_now.date()
return today.isoformat()
def argmap_to_swagger_params(argmap: ArgMap, req=None):
parser = Parser()
schema = parser._get_schema(argmap, req)
params = {}
for name, field in schema.fields.items():
params[name] = {
'description': field.metadata.get('description', name.capitalize()),
'type': field_to_type(field)
}
return params
def field_to_type(field: Field):
# TODO: improve this by using OBJ_TYPE and num_type when available
return field.__class__.__name__.lower()
def read_mawaqit_for_wilayas(directory):
mawaqit_for_wilayas = {}
for f in os.listdir(directory):
path = os.path.join(directory, f)
wilaya = Path(path).stem
mawaqit_for_wilayas[wilaya] = pd.read_csv(path, index_col=False)
return mawaqit_for_wilayas
def read_wilayas():
with open(settings.wilayas_file) as f:
return json.load(f)
def get_wilaya(code_or_name: str, wilayas: Iterable[dict] = None):
if wilayas is None:
wilayas = read_wilayas()
# convert the names found in the ministry PDFs to the ones found on wikipedia
if code_or_name in settings.rename:
code_or_name = settings.rename.get(code_or_name)
for wilaya in wilayas:
# TODO: don't stric compare, use a less rigirous way to handle the naming
# diffrences between data got from marw.dz and the one got from wikipedia.com
if code_or_name in wilaya.values():
return wilaya
return {
'code': code_or_name,
'arabic_name': code_or_name,
'english_name': code_or_name,
}
def look_for_rename(old_names: List[str], wilayas: Iterable[dict], path: str):
rename = {}
for old_name in old_names:
closest_name, _ = best_match(old_name, [wilaya['arabic_name'] for wilaya in wilayas])
rename[old_name] = closest_name
with open(path, 'w') as f:
output = 'rename:\n'
for x1, x2 in rename.items():
output = output + f" '{x1}': '{x2}'\n"
f.write(output)
def best_match(name: str, names: Iterable[str]):
distances = []
for other_name in names:
distance = lev.distance(name, other_name)
distances.append((other_name, distance))
best, minimum_distance = min(distances, key=itemgetter(1))
return best, minimum_distance
def read_mawaqit_for_wilayas_v2(directory):
mawaqit_for_wilayas = []
for f in os.listdir(directory):
path = os.path.join(directory, f)
arabic_name = Path(path).stem
wilaya = get_wilaya(arabic_name)
mawaqit_for_wilayas.append((wilaya, pd.read_csv(path, index_col=False)))
return mawaqit_for_wilayas
def get_wilayas_values(wilayas):
arabic_names = [w['arabic_name'] for w in wilayas]
french_names = [w['english_name'] for w in wilayas]
codes = [w['code'] for w in wilayas]
accepted_values = arabic_names + french_names + codes
return accepted_values
def create_mawaqits(mawaqit_for_wilayas, wilaya_column_name):
dfs = []
for wilaya, mawaqit in mawaqit_for_wilayas.items():
mawaqit[wilaya_column_name] = wilaya
dfs.append(mawaqit)
mawaqits = | pd.concat(dfs) | pandas.concat |
# Kør herfra ved start for at få fat i de nødvendige funktioner og dataframes
import Functions
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
coin_list_NA = ['BTC', 'BCHNA', 'CardonaNA', 'dogecoinNA', 'EOS_RNA', 'ETHNA', 'LTCNA', 'XRP_RNA', 'MoneroNA',
'BNB_RNA',
'IOTANA', 'TEZOSNA', ]
coin_list = ['BTC', 'BCH', 'Cardona', 'dogecoin', 'EOS', 'ETH', 'LTC', 'XRP', 'Monero', 'BNB', 'IOTA', 'TEZOS', ]
dfAllCoins = pd.DataFrame()
dfWMR = pd.read_csv('Data/' + coin_list[0] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfWMR['Date'] = pd.to_datetime(dfWMR['Date'], format='%b %d, %Y')
dfWMR['Date'] = pd.DatetimeIndex(dfWMR['Date']).date
dfWMR.index = dfWMR['Date']
dfWMR = dfWMR.sort_index()
for column in dfWMR.columns:
dfWMR = dfWMR.drop(columns=column)
dfPrices = dfWMR
dfReturns = dfWMR
dfMarketCap = dfWMR
dfPositive = dfWMR
dfNeutral = dfWMR
dfNegative = dfWMR
dfMOM3 = dfWMR
dfMOM5 = dfWMR
dfMOM7 = dfWMR
dfMOM14 = dfWMR
for i in range(0, len(coin_list)):
dfMarket = pd.read_csv('Data/' + coin_list[i] + '_marketdata.csv', sep=';', thousands=',', decimal='.')
dfMarket['Date'] = pd.to_datetime(dfMarket['Date'], format='%b %d, %Y')
dfMarket['Date'] = pd.DatetimeIndex(dfMarket['Date']).date
dfMarket.index = dfMarket['Date']
dfMarket = dfMarket.sort_index()
dfMarket['Return'] = dfMarket['Close**'].pct_change()
dfMarket = dfMarket[1:]
dfMarket['Mom3'] = dfMarket.Return.rolling(3).sum()
dfMarket['Mom5'] = dfMarket.Return.rolling(5).sum()
dfMarket['Mom7'] = dfMarket.Return.rolling(7).sum()
dfMarket['Mom14'] = dfMarket.Return.rolling(14).sum()
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Return']
dfReturns = dfReturns.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Close**']
dfPrices = dfPrices.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom3']
dfMOM3 = dfMOM3.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom5']
dfMOM5 = dfMOM5.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom7']
dfMOM7 = dfMOM7.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Mom14']
dfMOM14 = dfMOM14.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfMarket['Market Cap']
dfTemp[coin_list[i]] = dfTemp[coin_list[i]].fillna(method = 'ffill')
dfMarketCap = dfMarketCap.merge(dfTemp, how='left', left_index=True, right_index=True)
dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=',')
if coin_list[i] == 'BTC':
# dfSentiment = pd.read_csv('Data/' + coin_list_NA[i] + '_Actual_Sentiment.csv', index_col=0, sep=';')
dfSentiment = pd.read_csv('Data/All_Merged.csv', index_col=0, sep=',')
dfSentiment = dfSentiment[['positive_comment', 'neutral_comment', 'negative_comment']]
dfSentiment['Date'] = dfSentiment.index
dfSentiment['Date'] = pd.to_datetime(dfSentiment['Date'])
dfSentiment.index = pd.DatetimeIndex(dfSentiment['Date']).date
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['positive_comment']
dfPositive = dfPositive.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['negative_comment']
dfNegative = dfNegative.merge(dfTemp, how='left', left_index=True, right_index=True)
dfTemp = pd.DataFrame()
dfTemp[coin_list[i]] = dfSentiment['neutral_comment']
dfNeutral = dfNeutral.merge(dfTemp, how='left', left_index=True, right_index=True)
dfMarket['Coin'] = coin_list[i]
del dfSentiment['Date']
dfData = dfMarket.merge(dfSentiment, how='inner', left_index=True, right_index=True)
dfData = dfData.reset_index()
del dfData['index']
dfAllCoins = dfAllCoins.append(dfData)
dfAllCoins = dfAllCoins.drop(['created_utc'], axis=1)
dfWMR = pd.DataFrame()
dfReturnsLag = dfReturns.iloc[1:,:]
dfMarketCapLag = dfMarketCap.iloc[:-1,:]
dfMarketCapLag.index = dfReturnsLag.index
dfWMR['WMR'] = dfReturnsLag.multiply(dfMarketCapLag).sum(axis=1) / dfMarketCapLag.sum(axis=1)
dfPositiveSentimentSignal = pd.DataFrame()
dfNegativeSentimentSignal = pd.DataFrame()
dfAveragePositiveSentimentSignal = pd.DataFrame()
dfAverageNegativeSentimentSignal = pd.DataFrame()
dfActiveCoin = | pd.DataFrame() | pandas.DataFrame |
# This example script shows how to utilize idealreport to create various interactive HTML plots.
# The framework generates a "report" that is an HTML file with supporting js files.
#
# These are the steps to generate the example report:
# 1. Use python 2.7 and install the requirements using "pip install htmltag pandas"
# 2. Run this script "python sample_plots.py"
# 3. Open the resulting HTML file in a browswer "reports/sample_plots.html"
#
# This sample shows how to utilize the framework via:
# (1) create_html.py functions in create_html.py
# (2) report.py Reporter class in report.py which wraps functions in (1)
# The create_html functions are more general, but often require more verbose code.
#
# abbreviations in the comments:
# - df = a pandas DataFrame
# - ps = plot specification (python dictionary) used by create_html
import os
import htmltag
import idealreport as ir
import numpy as np
import pandas as pd
# data stored in pandas DataFrames (df)
# df: bar charts
df_bar = | pd.DataFrame(
{"Stat 1": [2.0, 1.6, 0.9, 0.2, -1.3], "Stat 2": [1.1, 0.7, -0.8, -1.4, 0.4], "Value 1": [8, 10, 50, 85, 42], "Value 2": [100, 50, 10, 100, 25]},
index=["Entity 1", "Entity 2", "Entity 3", "Entity 4", "Entity 5"],
) | pandas.DataFrame |
"""
Created on Wed Nov 07 2018
@author: Analytics Club at ETH <EMAIL>
"""
import itertools
import time
from time import localtime, strftime
from os import path, mkdir, rename
import sys
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report)
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from src.utils.evaluation import plot_confusion_matrix
plt.style.use('ggplot')
def test_model(model, name_model, x_train, y_train, x_test, y_test, details=False,
normalize=False, weights=None, return_model=False, lib='scikit-learn', fit_params={}):
"""
Function that does a detailed investigation of a given model. Confusion matrices are generated
and various metrics are shown.
Currently supported libraries: 'scikit-learn' (including Pipeline), 'keras'.
For language classification additional features are implemented and recognized by
pipelines named steps, if name:
- 'vect': (CountVectorizer) word counts are displayed for most and least frequent words
- 'tfidf': (TfidfTransformer) words with highest and lowest TFIDF scores are displayed
- 'multNB': (MultinomialNB) words with highest and lowest weights are shown
Parameters
----------
model : object with attributes fit & predict (+ others...)
The model being tested
name_model : string
Name of the model being tested
x_train : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y_train : array-like, shape (n_samples) or (n_samples, n_features)
Target relative to x_train for classification
x_test : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y_test : array-like, shape (n_samples) or (n_samples, n_features)
Target relative to x_test for classification
details : bool
If True evaluation about every parameter configuration is shown
default False
normalize : bool
Specifies wheter or not confusion matrix is normalized.
default False
weights : dict
weights used in fit method. For example for KerasClassifier
model.fit(x_train, y_train, class_weight=weights)
return_model : bool
model is returned if True
default False
lib : string
specifies which library the model belongs to
Possible choices are: 'scikit-learn' (default), 'keras'
fit_params : dict
fitting parameters for the classifier - only works for lib="keras",
pass weights via seperate argument, as the class labels need to be encoded otherwise.
Returns
-------
model, if return_model True
"""
if lib == 'keras':
le = LabelEncoder()
y_test_dec = y_test
y_test = le.fit_transform(y_test)
y_train_dec = y_train
y_train = le.transform(y_train)
# Encode the class label for the weights
df = | pd.DataFrame(weights, index=[0]) | pandas.DataFrame |
import cv2
import os
import time
import face_recognition
import pickle
from mss import mss
from PIL import Image
import pandas as pd
import argparse
import configparser
## Captures the current screen and returns the image ready to be saved
## Optional parameter to set incase there's more than 1 monitor.
## If the value set is outside of the valid range, set the value to 1
## Returns a raw image of the screen
def screen_cap(mnum=1):
with mss() as sct:
if mnum >= len(sct.monitors):
mnum = 1
monitor = sct.monitors[mnum]
sct_img = sct.grab(monitor)
return Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
## Identifies faces and saves them into the imgdir directory
## Creates a temp dataframe with the Date, ElapsedSeconds, Name, and EngagementLevel
## imgfile: Image file with the faces that you want recognized.
## classdata: Date of the class
## secselapased: Number of seconds elapsed in the recording so far
## imgdir: Directory to save the individual images in
## picklefile: opened face recognition file
## Returns the temp dataframe
def cycle(imgfile, classdate, secselapsed, imgdir, picklefile, emotionpickle, saveimage):
tempemotionframe = | pd.DataFrame(columns=['Date', 'ElapsedSeconds', 'Name', 'EmotionScore', 'EyeCount']) | pandas.DataFrame |
from cde.evaluation.empirical_eval.experiment_util import run_benchmark_train_test_fit_cv, run_benchmark_train_test_fit_cv_ml
import cde.evaluation.empirical_eval.datasets as datasets
from ml_logger import logger
import config
import pandas as pd
EXP_PREFIX = 'benchmark_empirical'
class Rule_of_thumb:
def __init__(self, scale_factor):
self.scale_factor = scale_factor
def __call__(self, n, d):
return self.scale_factor * n ** (-1 / (4 + d))
def __str__(self):
return "rule_of_thumb_%.2f" % self.scale_factor
class Polynomial_Rate:
def __init__(self, scale_factor, order):
self.scale_factor = scale_factor
self.order = order
def __call__(self, n, d):
return self.scale_factor * n ** (-1 / (self.order + d))
def __str__(self):
return "polynomial_rate_%i_%.2f" % (self.order, self.scale_factor)
# setup model dict
adaptive_noise_functions = [Rule_of_thumb(1.0), Rule_of_thumb(0.7), Rule_of_thumb(0.5),
Polynomial_Rate(2.0, 1), Polynomial_Rate(1.0, 1), Polynomial_Rate(1.0, 2),
Polynomial_Rate(2.0, 2), Polynomial_Rate(1.0, 3), Polynomial_Rate(2.0, 3)]
x_noise_stds = [0.02, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
y_noise_stds = [0.02, 0.05, 0.1, 0.15, 0.2, 0.25]
MDN_standard_params = {'estimator': 'MixtureDensityNetwork', 'n_training_epochs': 1000, 'hidden_sizes': [(32,32)],
'weight_normalization': False, 'random_seed': 40}
KMN_standard_params = {'estimator': 'KernelMixtureNetwork', 'n_training_epochs': 1000, 'hidden_sizes': [(32,32)],
'weight_normalization': False, 'random_seed': 40}
NF_standard_params = {'estimator': 'NormalizingFlowEstimator', 'n_training_epochs': 1000, 'hidden_sizes': [(32,32)],
'weight_normalization': False, 'random_seed': 40}
model_dict = {
# ---------------- MDN ----------------
'MDN_cv': {**MDN_standard_params, 'x_noise_std': x_noise_stds, 'y_noise_std': y_noise_stds, 'dropout': [0.0, 0.2],
'n_centers': [5, 10, 20, 50]},
# ---------------- KMN ----------------
'KMN_cv': {**KMN_standard_params, 'x_noise_std': x_noise_stds, 'y_noise_std': y_noise_stds, 'dropout': [0.0, 0.2],
'n_centers': [20, 50, 200]},
# ---------------- NF ------------------
'NF_cv': {**NF_standard_params, 'x_noise_std': x_noise_stds, 'y_noise_std': y_noise_stds, 'dropout': [0.0, 0.2],
'n_flows': [5, 10, 20, 50]},
'LSCDE_cv': {'estimator': 'LSConditionalDensityEstimation', 'bandwidth': [0.1, 0.2, 0.5, 0.7],
'n_centers': [500, 1000], 'regularization': [0.1, 0.5, 1.0, 4.0, 8.0], 'random_seed': 40},
}
def experiment():
logger.configure(log_directory=config.DATA_DIR, prefix=EXP_PREFIX, color='green')
# 1) EUROSTOXX
dataset = datasets.EuroStoxx50()
result_df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=False,
n_folds=5, seed=22)
# 2) NYC Taxi
for n_samples in [10000]:
dataset = datasets.NCYTaxiDropoffPredict(n_samples=n_samples)
df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=True,
n_folds=5, seed=22, n_jobs_inner=-1, n_jobc_outer=2)
result_df = | pd.concat([result_df, df], ignore_index=True) | pandas.concat |
import sys,os
#os.chdir("/Users/utkarshvirendranigam/Desktop/Homework/Project")
# required_packages=["PyQt5","re", "scipy","itertools","random","matplotlib","pandas","numpy","sklearn","pydotplus","collections","warnings","seaborn"]
#print(os.getcwd())
# for my_package in required_packages:
# try:
# command_string="conda install "+ my_package+ " --yes"
# os.system(command_string)
# except:
# count=1
from PyQt5.QtWidgets import (QMainWindow, QApplication, QWidget, QPushButton, QAction, QComboBox, QLabel,
QGridLayout, QCheckBox, QGroupBox, QVBoxLayout, QHBoxLayout, QLineEdit, QPlainTextEdit)
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot, QRect
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtCore import Qt
# from scipy import interp
from itertools import cycle, combinations
import random
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QSizePolicy, QFormLayout, QRadioButton, QScrollArea, QMessageBox
from PyQt5.QtGui import QPixmap
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import pandas as pd
import numpy as np
import pickle
from numpy.polynomial.polynomial import polyfit
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.compose import make_column_transformer
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc, log_loss, brier_score_loss
from sklearn.calibration import calibration_curve
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import feature_selection
from sklearn import metrics
from sklearn.preprocessing import label_binarize
from sklearn.model_selection import cross_val_predict
# Libraries to display decision tree
from pydotplus import graph_from_dot_data
import collections
from sklearn.tree import export_graphviz
import webbrowser
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from Preprocessing import PreProcessing
import random
import seaborn as sns
#%%-----------------------------------------------------------------------
import os
os.environ["PATH"] += os.pathsep + 'C:\\Program Files (x86)\\graphviz-2.38\\release\\bin'
#%%-----------------------------------------------------------------------
#::--------------------------------
# Deafault font size for all the windows
#::--------------------------------
font_size_window = 'font-size:18px'
class DecisionTree(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(DecisionTree, self).__init__()
self.Title = "Decision Tree Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Decision Tree Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
self.btnDTFigure = QPushButton("View Tree")
self.btnDTFigure.setGeometry(QRect(60, 500, 75, 23))
self.btnDTFigure.clicked.connect(self.view_tree)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox1Layout.addWidget(self.btnDTFigure, 21, 1, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
# -----------------------------------------------------------------------
filename = 'dt_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test= X[features_list]
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
def view_tree(self):
'''
Executes the graphviz to create a tree view of the information
then it presents the graphic in a pdf formt using webbrowser
:return:None
'''
webbrowser.open_new(r'decision_tree_entropy.pdf')
class RandomForest(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(RandomForest, self).__init__()
self.Title = "Random Forest Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Random Forest Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.lblMaxDepth = QLabel('Maximun Depth :')
self.txtMaxDepth = QLineEdit(self)
self.txtMaxDepth.setText("3")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.lblMaxDepth, 20, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtMaxDepth, 20, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_dt = QLineEdit()
self.other_models.layout.addRow('Logistic:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Decision tree:', self.txtAccuracy_dt)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : Importance of Features
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('Importance of Features')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
vmax_depth = float(self.txtMaxDepth.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
filename = 'rf_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test = X[features_list]
# -----------------------------------------------------------------------
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Random Forest')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - 3 Feature Importances
#####################################
# get feature importances
importances = self.clf_entropy.feature_importances_
# convert the importances into one-dimensional 1darray with corresponding df column names as axis labels
f_importances = pd.Series(importances, self.list_corr_features.columns)
# sort the array in descending order of the importances, only show the first 10
f_importances.sort_values(ascending=False, inplace=True)
f_importances = f_importances[0:10]
X_Features = f_importances.index
y_Importance = list(f_importances)
self.ax3.barh(X_Features, y_Importance)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'dt_finalized_model.sav'
self.other_clf_dt = pickle.load(open(filename3, 'rb'))
y_pred_dt = self.other_clf_dt.predict(X_test)
self.accuracy_dt = accuracy_score(y_test, y_pred_dt) * 100
self.txtAccuracy_dt.setText(str(self.accuracy_dt))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
class LogisticReg(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(LogisticReg, self).__init__()
self.Title = "Logistic Regression Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Logistic Regression Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_dt = QLineEdit()
self.txtAccuracy_gb = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Decision Tree:', self.txtAccuracy_dt)
self.other_models.layout.addRow('Gradient Boosting:', self.txtAccuracy_gb)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : k-fold Cross validation
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('K-fold cross validation')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
filename = 'lr_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test = X[features_list]
# -----------------------------------------------------------------------
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Logistic Regression')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - cross validation
#####################################
# get cross validation
score2=cross_val_score(self.clf_entropy,X_test,cv=5,y=y_test,scoring='accuracy',n_jobs=-1)
# repeats=range(1,15)
# results=list()
# for r in repeats:
self.ax3.boxplot(score2)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'dt_finalized_model.sav'
self.other_clf_dt = pickle.load(open(filename2, 'rb'))
y_pred_dt = self.other_clf_dt.predict(X_test)
self.accuracy_dt = accuracy_score(y_test, y_pred_dt) * 100
self.txtAccuracy_dt.setText(str(self.accuracy_dt))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'gb_finalized_model.sav'
self.other_clf_gb = pickle.load(open(filename4, 'rb'))
y_pred_gb = self.other_clf_gb.predict(X_test)
self.accuracy_gb = accuracy_score(y_test, y_pred_gb) * 100
self.txtAccuracy_gb.setText(str(self.accuracy_gb))
class GradientBoosting(QMainWindow):
#::--------------------------------------------------------------------------------
# Implementation of Random Forest Classifier using the happiness dataset
# the methods in this class are
# _init_ : initialize the class
# initUi : creates the canvas and all the elements in the canvas
# update : populates the elements of the canvas base on the parametes
# chosen by the user
#::---------------------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
super(GradientBoosting, self).__init__()
self.Title = "Gradient Boosting Classifier"
self.initUi()
def initUi(self):
#::-----------------------------------------------------------------
# Create the canvas and all the element to create a dashboard with
# all the necessary elements to present the results from the algorithm
# The canvas is divided using a grid loyout to facilitate the drawing
# of the elements
#::-----------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QGridLayout(self.main_widget)
self.groupBox1 = QGroupBox('Gradient Boosting Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0],self)
self.feature1 = QCheckBox(features_list[1],self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4],self)
self.feature5 = QCheckBox(features_list[5],self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.feature24.setChecked(True)
self.lblPercentTest = QLabel('Percentage for Test :')
self.lblPercentTest.adjustSize()
self.txtPercentTest = QLineEdit(self)
self.txtPercentTest.setText("30")
self.btnExecute = QPushButton("Run Model")
self.btnExecute.setGeometry(QRect(60, 500, 75, 23))
self.btnExecute.clicked.connect(self.update)
# We create a checkbox for each feature
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 3, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 3, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 4, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 4, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 5, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 5, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 6, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 6, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 7, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 7, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 8, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 8, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 9, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 9, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 10, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 10, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 11, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 11, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature24, 12, 0, 1, 1)
self.groupBox1Layout.addWidget(self.lblPercentTest, 19, 0, 1, 1)
self.groupBox1Layout.addWidget(self.txtPercentTest, 19, 1, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute, 21, 0, 1, 1)
self.groupBox2 = QGroupBox('Measurements:')
self.groupBox2Layout = QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
# self.groupBox2.setMinimumSize(400, 100)
self.current_model_summary = QWidget(self)
self.current_model_summary.layout = QFormLayout(self.current_model_summary)
self.txtCurrentAccuracy = QLineEdit()
self.txtCurrentPrecision = QLineEdit()
self.txtCurrentRecall = QLineEdit()
self.txtCurrentF1score = QLineEdit()
self.current_model_summary.layout.addRow('Accuracy:', self.txtCurrentAccuracy)
self.current_model_summary.layout.addRow('Precision:', self.txtCurrentPrecision)
self.current_model_summary.layout.addRow('Recall:', self.txtCurrentRecall)
self.current_model_summary.layout.addRow('F1 Score:', self.txtCurrentF1score)
self.groupBox2Layout.addWidget(self.current_model_summary)
self.groupBox3 = QGroupBox('Other Models Accuracy:')
self.groupBox3Layout = QVBoxLayout()
self.groupBox3.setLayout(self.groupBox3Layout)
self.other_models = QWidget(self)
self.other_models.layout = QFormLayout(self.other_models)
self.txtAccuracy_lr = QLineEdit()
self.txtAccuracy_dt = QLineEdit()
self.txtAccuracy_rf = QLineEdit()
self.other_models.layout.addRow('Decision Tree:', self.txtAccuracy_dt)
self.other_models.layout.addRow('Logistic Regression:', self.txtAccuracy_lr)
self.other_models.layout.addRow('Random Forest:', self.txtAccuracy_rf)
self.groupBox3Layout.addWidget(self.other_models)
#::-------------------------------------
# Graphic 1 : Confusion Matrix
#::-------------------------------------
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBoxG1 = QGroupBox('Confusion Matrix')
self.groupBoxG1Layout= QVBoxLayout()
self.groupBoxG1.setLayout(self.groupBoxG1Layout)
self.groupBoxG1Layout.addWidget(self.canvas)
#::---------------------------------------------
# Graphic 2 : ROC Curve
#::---------------------------------------------
self.fig2 = Figure()
self.ax2 = self.fig2.add_subplot(111)
self.axes2 = [self.ax2]
self.canvas2 = FigureCanvas(self.fig2)
self.canvas2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas2.updateGeometry()
self.groupBoxG2 = QGroupBox('ROC Curve')
self.groupBoxG2Layout = QVBoxLayout()
self.groupBoxG2.setLayout(self.groupBoxG2Layout)
self.groupBoxG2Layout.addWidget(self.canvas2)
#::-------------------------------------------
# Graphic 3 : k-fold Cross validation
#::-------------------------------------------
self.fig3 = Figure()
self.ax3 = self.fig3.add_subplot(111)
self.axes3 = [self.ax3]
self.canvas3 = FigureCanvas(self.fig3)
self.canvas3.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas3.updateGeometry()
self.groupBoxG3 = QGroupBox('K-fold cross validation')
self.groupBoxG3Layout = QVBoxLayout()
self.groupBoxG3.setLayout(self.groupBoxG3Layout)
self.groupBoxG3Layout.addWidget(self.canvas3)
#::--------------------------------------------
# Graphic 4 : ROC Curve by class
#::--------------------------------------------
self.fig4 = Figure()
self.ax4 = self.fig4.add_subplot(111)
self.axes4 = [self.ax4]
self.canvas4 = FigureCanvas(self.fig4)
self.canvas4.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.canvas4.updateGeometry()
self.groupBoxG4 = QGroupBox('ROC Curve by Class')
self.groupBoxG4Layout = QVBoxLayout()
self.groupBoxG4.setLayout(self.groupBoxG4Layout)
self.groupBoxG4Layout.addWidget(self.canvas4)
#::-------------------------------------------------
# End of graphs
#::-------------------------------------------------
self.layout.addWidget(self.groupBox1, 0, 0, 3, 2)
self.layout.addWidget(self.groupBoxG1, 0, 2, 1, 1)
self.layout.addWidget(self.groupBoxG3, 0, 3, 1, 1)
self.layout.addWidget(self.groupBoxG2, 1, 2, 1, 1)
self.layout.addWidget(self.groupBoxG4, 1, 3, 1, 1)
self.layout.addWidget(self.groupBox2, 2, 2, 1, 1)
self.layout.addWidget(self.groupBox3, 2, 3, 1, 1)
self.setCentralWidget(self.main_widget)
self.resize(1800, 1200)
self.show()
def update(self):
'''
Random Forest Classifier
We pupulate the dashboard using the parametres chosen by the user
The parameters are processed to execute in the skit-learn Random Forest algorithm
then the results are presented in graphics and reports in the canvas
:return:None
'''
# processing the parameters
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]],axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]],axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]],axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[3]]],axis=1)
if self.feature4.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[4]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[4]]],axis=1)
if self.feature5.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[5]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[5]]],axis=1)
if self.feature6.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[6]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[6]]],axis=1)
if self.feature7.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[7]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[7]]],axis=1)
if self.feature8.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[8]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[8]]],axis=1)
if self.feature9.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[9]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[9]]],axis=1)
if self.feature10.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[10]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[10]]], axis=1)
if self.feature11.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[11]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[11]]], axis=1)
if self.feature12.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[12]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[12]]], axis=1)
if self.feature13.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[13]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[13]]], axis=1)
if self.feature14.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[14]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[14]]], axis=1)
if self.feature15.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[15]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[15]]], axis=1)
if self.feature16.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[16]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[16]]], axis=1)
if self.feature17.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[17]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[17]]], axis=1)
if self.feature18.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[18]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[18]]], axis=1)
if self.feature19.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[19]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[19]]], axis=1)
if self.feature20.isChecked():
if len(self.list_corr_features)==0:
self.list_corr_features = df[features_list[20]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[20]]],axis=1)
if self.feature21.isChecked():
if len(self.list_corr_features) == 20:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[21]]],axis=1)
if self.feature22.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[22]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[22]]],axis=1)
if self.feature23.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[23]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[23]]],axis=1)
if self.feature24.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[24]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[24]]],axis=1)
vtest_per = float(self.txtPercentTest.text())
# Clear the graphs to populate them with the new information
self.ax1.clear()
self.ax2.clear()
self.ax3.clear()
self.ax4.clear()
# self.txtResults.clear()
# self.txtResults.setUndoRedoEnabled(False)
vtest_per = vtest_per / 100
filename = 'gb_finalized_model.sav'
self.clf_entropy = pickle.load(open(filename, 'rb'))
y_test = y
X_test = X[features_list]
# -----------------------------------------------------------------------
# predicton on test using entropy
y_pred_entropy = self.clf_entropy.predict(X_test)
# confusion matrix for RandomForest
conf_matrix = confusion_matrix(y_test, y_pred_entropy)
# accuracy score
self.ff_accuracy_score = accuracy_score(y_test, y_pred_entropy) * 100
self.txtCurrentAccuracy.setText(str(self.ff_accuracy_score))
# precision score
self.ff_precision_score = precision_score(y_test, y_pred_entropy) * 100
self.txtCurrentPrecision.setText(str(self.ff_precision_score))
# recall score
self.ff_recall_score = recall_score(y_test, y_pred_entropy) * 100
self.txtCurrentRecall.setText(str(self.ff_recall_score))
# f1_score
self.ff_f1_score = f1_score(y_test, y_pred_entropy)
self.txtCurrentF1score.setText(str(self.ff_f1_score))
#::------------------------------------
## Ghaph1 :
## Confusion Matrix
#::------------------------------------
class_names1 = ['', 'No', 'Yes']
self.ax1.matshow(conf_matrix, cmap=plt.cm.get_cmap('Blues', 14))
self.ax1.set_yticklabels(class_names1)
self.ax1.set_xticklabels(class_names1, rotation=90)
self.ax1.set_xlabel('Predicted label')
self.ax1.set_ylabel('True label')
for i in range(len(class_names)):
for j in range(len(class_names)):
y_pred_score = self.clf_entropy.predict_proba(X_test)
self.ax1.text(j, i, str(conf_matrix[i][j]))
self.fig.tight_layout()
self.fig.canvas.draw_idle()
#::----------------------------------------
## Graph 2 - ROC Curve
#::----------------------------------------
y_test_bin = pd.get_dummies(y_test).to_numpy()
n_classes = y_test_bin.shape[1]
# From the sckict learn site
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_pred_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test_bin.ravel(), y_pred_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
lw = 2
self.ax2.plot(fpr[1], tpr[1], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[1])
self.ax2.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
self.ax2.set_xlim([0.0, 1.0])
self.ax2.set_ylim([0.0, 1.05])
self.ax2.set_xlabel('False Positive Rate')
self.ax2.set_ylabel('True Positive Rate')
self.ax2.set_title('ROC Curve Gradient Boosting')
self.ax2.legend(loc="lower right")
self.fig2.tight_layout()
self.fig2.canvas.draw_idle()
######################################
# Graph - cross validation
#####################################
# get cross validation
score2=cross_val_score(self.clf_entropy,X_test,cv=5,y=y_test,scoring='accuracy',n_jobs=-1)
# repeats=range(1,15)
# results=list()
# for r in repeats:
self.ax3.boxplot(score2)
self.ax3.set_aspect('auto')
# show the plot
self.fig3.tight_layout()
self.fig3.canvas.draw_idle()
#::-----------------------------------------------------
# Graph 4 - ROC Curve by Class
#::-----------------------------------------------------
str_classes = ['No','Yes']
colors = cycle(['magenta', 'darkorange'])
for i, color in zip(range(n_classes), colors):
self.ax4.plot(fpr[i], tpr[i], color=color, lw=lw,
label='{0} (area = {1:0.2f})'
''.format(str_classes[i], roc_auc[i]))
self.ax4.plot([0, 1], [0, 1], 'k--', lw=lw)
self.ax4.set_xlim([0.0, 1.0])
self.ax4.set_ylim([0.0, 1.05])
self.ax4.set_xlabel('False Positive Rate')
self.ax4.set_ylabel('True Positive Rate')
self.ax4.set_title('ROC Curve by Class')
self.ax4.legend(loc="lower right")
# show the plot
self.fig4.tight_layout()
self.fig4.canvas.draw_idle()
#::-----------------------------------------------------
# Other Models Comparison
#::-----------------------------------------------------
filename2 = 'lr_finalized_model.sav'
self.other_clf_lr = pickle.load(open(filename2, 'rb'))
y_pred_lr = self.other_clf_lr.predict(X_test)
self.accuracy_lr = accuracy_score(y_test, y_pred_lr) * 100
self.txtAccuracy_lr.setText(str(self.accuracy_lr))
filename3 = 'rf_finalized_model.sav'
self.other_clf_rf = pickle.load(open(filename3, 'rb'))
y_pred_rf = self.other_clf_rf.predict(X_test)
self.accuracy_rf = accuracy_score(y_test, y_pred_rf) * 100
self.txtAccuracy_rf.setText(str(self.accuracy_rf))
filename4 = 'dt_finalized_model.sav'
self.other_clf_dt = pickle.load(open(filename4, 'rb'))
y_pred_dt = self.other_clf_dt.predict(X_test)
self.accuracy_dt = accuracy_score(y_test, y_pred_dt) * 100
self.txtAccuracy_dt.setText(str(self.accuracy_dt))
class TargetDistribution(QMainWindow):
#::---------------------------------------------------------
# This class crates a canvas with a plot to show the distribution
# from each feature in the dataset with the target variables
# methods
# _init_
# update
#::---------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
#::--------------------------------------------------------
# Crate a canvas with the layout to draw a dotplot
# The layout sets all the elements and manage the changes
# made on the canvas
#::--------------------------------------------------------
super(TargetDistribution, self).__init__()
self.Title = "EDA: Variable Distribution"
self.main_widget = QWidget(self)
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
self.axes = [self.ax]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.dropdown1 = QComboBox()
self.featuresList = numerical.copy()
self.dropdown1.addItems(self.featuresList)
self.dropdown1.currentIndexChanged.connect(self.update)
self.label = QLabel("A plot:")
self.layout = QGridLayout(self.main_widget)
self.layout.addWidget(QLabel("Select Features:"), 0, 0, 1, 1)
self.layout.addWidget(self.dropdown1, 0, 1, 1, 1)
self.filter_data = QWidget(self)
self.filter_data.layout = QGridLayout(self.filter_data)
self.filter_data.layout.addWidget(QLabel("Choose Data Filter:"), 0, 0, 1, 1)
self.filter_radio_button = QRadioButton("All Data")
self.filter_radio_button.setChecked(True)
self.filter_radio_button.filter = "All_Data"
self.set_Filter = "All_Data"
self.filter_radio_button.toggled.connect(self.onFilterClicked)
self.filter_data.layout.addWidget(self.filter_radio_button, 0, 1, 1, 1)
self.filter_radio_button = QRadioButton("Loan Default: Yes")
self.filter_radio_button.filter = 1
self.filter_radio_button.toggled.connect(self.onFilterClicked)
self.filter_data.layout.addWidget(self.filter_radio_button, 0, 2, 1, 1)
self.filter_radio_button = QRadioButton("Loan Default: No")
self.filter_radio_button.filter = 0
self.filter_radio_button.toggled.connect(self.onFilterClicked)
self.filter_data.layout.addWidget(self.filter_radio_button, 0, 3, 1, 1)
self.btnCreateGraph = QPushButton("Show Distribution")
self.btnCreateGraph.clicked.connect(self.update)
self.groupBox1 = QGroupBox('Distribution')
self.groupBox1Layout = QVBoxLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.groupBox1Layout.addWidget(self.canvas)
self.layout.addWidget(self.filter_data, 1, 0, 2, 2)
self.layout.addWidget(self.btnCreateGraph, 0, 3, 2, 2)
self.layout.addWidget(self.groupBox1, 3, 0, 5, 5)
self.setCentralWidget(self.main_widget)
self.resize(1200, 700)
self.show()
def onFilterClicked(self):
self.filter_radio_button = self.sender()
if self.filter_radio_button.isChecked():
self.set_Filter = self.filter_radio_button.filter
self.update()
def update(self):
#::--------------------------------------------------------
# This method executes each time a change is made on the canvas
# containing the elements of the graph
# The purpose of the method es to draw a dot graph using the
# score of happiness and the feature chosen the canvas
#::--------------------------------------------------------
colors = ["b", "r", "g", "y", "k", "c"]
self.ax.clear()
cat1 = self.dropdown1.currentText()
if (self.set_Filter == 1 or self.set_Filter == 0):
self.filtered_data = df_orig.copy()
self.filtered_data = self.filtered_data[self.filtered_data["loan_default"] == self.set_Filter]
else:
self.filtered_data = df_orig.copy()
self.ax.hist(self.filtered_data[cat1], bins=50, facecolor='blue', alpha=0.5)
self.ax.set_title(cat1)
self.ax.set_xlabel(cat1)
self.ax.set_ylabel("Count")
self.ax.grid(True)
self.fig.tight_layout()
self.fig.canvas.draw_idle()
del cat1
del self.filtered_data
class TargetCount(QMainWindow):
#::---------------------------------------------------------
# This class crates a canvas with a plot to show the distribution
# from each feature in the dataset with the target variables
# methods
# _init_
# update
#::---------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
#::--------------------------------------------------------
# Crate a canvas with the layout to draw a dotplot
# The layout sets all the elements and manage the changes
# made on the canvas
#::--------------------------------------------------------
super(TargetCount, self).__init__()
self.Title = "EDA: Variable Distribution"
self.main_widget = QWidget(self)
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
self.axes = [self.ax]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.dropdown1 = QComboBox()
self.featuresList = categorical.copy()
self.dropdown1.addItems(self.featuresList)
self.dropdown1.currentIndexChanged.connect(self.update)
self.label = QLabel("A plot:")
self.layout = QGridLayout(self.main_widget)
self.layout.addWidget(QLabel("Select Features:"))
self.layout.addWidget(self.dropdown1)
self.layout.addWidget(self.canvas)
self.setCentralWidget(self.main_widget)
self.resize(1200, 700)
self.show()
# def get_bar_dict(self, cat1, level_list):
# count_yes = []
# count_no = []
# for level in level_list:
# count_no.append(len(df_orig[(df_orig[cat1] == level) & (df_orig[target] == 0)]))
# count_yes.append(len(df_orig[(df_orig[cat1] == level) & (df_orig[target] == 1)]))
# return count_no, count_yes
def update(self):
#::--------------------------------------------------------
# This method executes each time a change is made on the canvas
# containing the elements of the graph
# The purpose of the method es to draw a dot graph using the
# score of happiness and the feature chosen the canvas
#::--------------------------------------------------------
colors = ["b", "r", "g", "y", "k", "c"]
self.ax.clear()
cat1 = self.dropdown1.currentText()
df_pick = df_orig[cat1]
level_list = list(df_pick.unique())
count_yes = []
count_no = []
for level in level_list:
count_no.append(len(df_orig[(df_orig[cat1] == level) & (df_orig[target] == 0)]))
count_yes.append(len(df_orig[(df_orig[cat1] == level) & (df_orig[target] == 1)]))
all_width = 0.7
width = all_width / 2
onset = width / 2
x1, x2 = [x - onset for x in range(len(level_list))], [x + onset for x in range(len(level_list))]
self.ax.bar(x1, count_no, align='edge', width=width, label='Default:No')
self.ax.bar(x2, count_yes, align='edge', width=width, label='Default: Yes')
self.ax.set_xticks(range(len(level_list)))
self.ax.set_xticklabels(level_list)
self.ax.legend()
self.ax.set_title(cat1)
self.ax.set_xlabel(cat1)
self.ax.set_ylabel("Count")
self.ax.grid(True)
self.fig.tight_layout()
self.fig.canvas.draw_idle()
del cat1
class CorrelationPlot(QMainWindow):
#;:-----------------------------------------------------------------------
# This class creates a canvas to draw a correlation plot
# It presents all the features plus the happiness score
# the methods for this class are:
# _init_
# initUi
# update
#::-----------------------------------------------------------------------
send_fig = pyqtSignal(str)
def __init__(self):
#::--------------------------------------------------------
# Initialize the values of the class
#::--------------------------------------------------------
super(CorrelationPlot, self).__init__()
self.Title = 'Correlation Plot'
self.initUi()
def initUi(self):
#::--------------------------------------------------------------
# Creates the canvas and elements of the canvas
#::--------------------------------------------------------------
self.setWindowTitle(self.Title)
self.setStyleSheet(font_size_window)
self.main_widget = QWidget(self)
self.layout = QVBoxLayout(self.main_widget)
self.groupBox1 = QGroupBox('Correlation Plot Features')
self.groupBox1Layout= QGridLayout()
self.groupBox1.setLayout(self.groupBox1Layout)
self.feature0 = QCheckBox(features_list[0], self)
self.feature1 = QCheckBox(features_list[1], self)
self.feature2 = QCheckBox(features_list[2], self)
self.feature3 = QCheckBox(features_list[3], self)
self.feature4 = QCheckBox(features_list[4], self)
self.feature5 = QCheckBox(features_list[5], self)
self.feature6 = QCheckBox(features_list[6], self)
self.feature7 = QCheckBox(features_list[7], self)
self.feature8 = QCheckBox(features_list[8], self)
self.feature9 = QCheckBox(features_list[9], self)
self.feature10 = QCheckBox(features_list[10], self)
self.feature11 = QCheckBox(features_list[11], self)
self.feature12 = QCheckBox(features_list[12], self)
self.feature13 = QCheckBox(features_list[13], self)
self.feature14 = QCheckBox(features_list[14], self)
self.feature15 = QCheckBox(features_list[15], self)
self.feature16 = QCheckBox(features_list[16], self)
self.feature17 = QCheckBox(features_list[17], self)
self.feature18 = QCheckBox(features_list[18], self)
self.feature19 = QCheckBox(features_list[19], self)
self.feature20 = QCheckBox(features_list[20], self)
self.feature21 = QCheckBox(features_list[21], self)
self.feature22 = QCheckBox(features_list[22], self)
self.feature23 = QCheckBox(features_list[23], self)
self.feature24 = QCheckBox(features_list[24], self)
self.feature0.setChecked(True)
self.feature1.setChecked(True)
self.feature2.setChecked(True)
self.feature3.setChecked(True)
self.feature4.setChecked(True)
self.feature5.setChecked(True)
self.feature6.setChecked(True)
self.feature7.setChecked(True)
self.feature8.setChecked(True)
self.feature9.setChecked(True)
self.feature10.setChecked(True)
self.feature11.setChecked(True)
self.feature12.setChecked(True)
self.feature13.setChecked(True)
self.feature14.setChecked(True)
self.feature15.setChecked(True)
self.feature16.setChecked(True)
self.feature17.setChecked(True)
self.feature18.setChecked(True)
self.feature19.setChecked(True)
self.feature20.setChecked(True)
self.feature21.setChecked(True)
self.feature22.setChecked(True)
self.feature23.setChecked(True)
self.btnExecute = QPushButton("Create Plot")
self.btnExecute.clicked.connect(self.update)
self.groupBox1Layout.addWidget(self.feature0, 0, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature1, 0, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature2, 0, 2, 1, 1)
self.groupBox1Layout.addWidget(self.feature3, 0, 3, 1, 1)
self.groupBox1Layout.addWidget(self.feature4, 0, 4, 1, 1)
self.groupBox1Layout.addWidget(self.feature5, 0, 5, 1, 1)
self.groupBox1Layout.addWidget(self.feature6, 0, 6, 1, 1)
self.groupBox1Layout.addWidget(self.feature7, 0, 7, 1, 1)
self.groupBox1Layout.addWidget(self.feature8, 1, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature9, 1, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature10, 1, 2, 1, 1)
self.groupBox1Layout.addWidget(self.feature11, 1, 3, 1, 1)
self.groupBox1Layout.addWidget(self.feature12, 1, 4, 1, 1)
self.groupBox1Layout.addWidget(self.feature13, 1, 5, 1, 1)
self.groupBox1Layout.addWidget(self.feature14, 1, 6, 1, 1)
self.groupBox1Layout.addWidget(self.feature15, 1, 7, 1, 1)
self.groupBox1Layout.addWidget(self.feature16, 2, 0, 1, 1)
self.groupBox1Layout.addWidget(self.feature17, 2, 1, 1, 1)
self.groupBox1Layout.addWidget(self.feature18, 2, 2, 1, 1)
self.groupBox1Layout.addWidget(self.feature19, 2, 3, 1, 1)
self.groupBox1Layout.addWidget(self.feature20, 2, 4, 1, 1)
self.groupBox1Layout.addWidget(self.feature21, 2, 5, 1, 1)
self.groupBox1Layout.addWidget(self.feature22, 2, 6, 1, 1)
self.groupBox1Layout.addWidget(self.feature23, 2, 7, 1, 1)
self.groupBox1Layout.addWidget(self.btnExecute,5,3,1,1)
self.fig = Figure()
self.ax1 = self.fig.add_subplot(111)
self.axes=[self.ax1]
self.canvas = FigureCanvas(self.fig)
self.canvas.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.canvas.updateGeometry()
self.groupBox2 = QGroupBox('Correlation Plot')
self.groupBox2Layout= QVBoxLayout()
self.groupBox2.setLayout(self.groupBox2Layout)
self.groupBox2Layout.addWidget(self.canvas)
self.layout.addWidget(self.groupBox1)
self.layout.addWidget(self.groupBox2)
self.setCentralWidget(self.main_widget)
self.resize(1500, 1400)
self.show()
self.update()
def update(self):
#::------------------------------------------------------------
# Populates the elements in the canvas using the values
# chosen as parameters for the correlation plot
#::------------------------------------------------------------
self.ax1.clear()
self.list_corr_features = pd.DataFrame([])
if self.feature0.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[0]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[0]]], axis=1)
if self.feature1.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[1]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[1]]], axis=1)
if self.feature2.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[2]]
else:
self.list_corr_features = pd.concat([self.list_corr_features, df[features_list[2]]], axis=1)
if self.feature3.isChecked():
if len(self.list_corr_features) == 0:
self.list_corr_features = df[features_list[3]]
else:
self.list_corr_features = | pd.concat([self.list_corr_features, df[features_list[3]]], axis=1) | pandas.concat |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.