prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from pandas import DataFrame
from requests.models import HTTPError
import pandas as pd
import tmdbsimple as tmdb
import json
flatten = lambda l: [item for sublist in l for item in sublist]
def create_actors_dataframe(credits_df, save_path=None, actor_id=None):
"""Create the dataframe of actors present in the tmdb dataset.
Parameters
----------
credits_df : pandas.DataFrame
dataframe from the file tmdb_5000_credits.csv
save_path : str or None
Save the dataframe to the given path if not None
Return
------
pandas.DataFrame
DataFrame which contains information about actors in the tmdb dataset
"""
columns_to_drop = ['also_known_as']
actors = flatten([json.loads(item) for index, item in credits_df.cast.iteritems()])
if actor_id is not None:
list_of_id = list(set([actor['id'] for actor in actors]))
recover_index = list_of_id.index(actor_id)
list_of_id = list_of_id[recover_index:]
else:
list_of_id = set([actor['id'] for actor in actors])
actors.clear()
for state, id in enumerate(list_of_id):
try:
actor = tmdb.People(id).info()
except HTTPError:
print(f'id {id} not found')
else:
actors.append(actor)
if save_path is not None and state % 500 == 0:
actors_df = pd.DataFrame(actors).set_index('id').drop(columns_to_drop, axis=1)
actors_df.to_csv(save_path)
actors_df = pd.DataFrame(actors).set_index('id').drop(columns_to_drop, axis=1)
if save_path is not None:
actors_df.to_csv(save_path)
return list_of_id
def clean_movies_dataframe(movies_df: pd.DataFrame, save_path=None)-> pd.DataFrame:
"""
Create dataset containing all informations related to a movie (budget, income, popularity...)
Parameters
----------
movies_df : pandas.DataFrame
dataframe from the file tmdb_5000_movies.csv
save_path : str or None
Save the dataframe to the given path if not None
Returns
-------
pandas.DataFrame
DataFrame which contains information about movies in the tmdb dataset
"""
df = movies_df.copy()
for col in ['keywords', 'genres', 'spoken_languages']:
df[col] = df[col].map(lambda values: '-'.join([value['name'] for value in json.loads(values)]))
df['release_date'] = pd.to_datetime(df['release_date'], format='%Y%m%d', errors='ignore')
df = df.drop(['production_companies', 'production_countries', 'homepage', 'overview', 'tagline'], axis=1)
df.reset_index(drop=True)
if save_path is not None:
df.to_csv(save_path, index=False)
return df
def create_genders_table(movies_df: pd.DataFrame, save_path: object = None) -> pd.DataFrame:
"""
Create table (DataFrame) which links gender to our id.
Parameters
----------
movies_df : pandas.DataFrame
save_path : str or None
Save the dataframe to the given path if not None
Returns
-------
pandas.DataFrame
DataFrame indexing gender to our id
"""
genders_list = flatten([json.loads(item) for index, item in movies_df.genres.iteritems()])
genders_set = set([(g['id'], g['name']) for g in genders_list])
genders_table = pd.DataFrame(genders_set, columns=['id', 'name'])
if save_path is not None:
genders_table.to_csv(save_path, index=False)
return genders_table
def create_genders_movies(movies_df:pd.DataFrame, save_path: object = None) -> pd.DataFrame:
"""
Create table (DataFrame) which links gender id to movie id
Parameters
----------
movies_df : pandas.DataFrame
dataframe from the file tmdb_5000_movies.csv
save_path : str or None
Save the dataframe to the given path if not None
Returns
-------
pandas.DataFrame
DataFrame which contains gender_id and movie_id
"""
movies_genders = | pd.DataFrame({'movie_id': [], 'gender_id': []}) | pandas.DataFrame |
""" MCH API ver 0.1
Author: <NAME>
License: CC-BY-SA 4.0
2020 Mexico
"""
import os
from flask import Flask, jsonify, json, Response
from flask_restful import Api, Resource, reqparse, abort
from flask_mysqldb import MySQL
import pandas as pd
import numpy as np
import json
from os.path import abspath, dirname, join
app = Flask(__name__)
# Mysql connection
app.config['MYSQL_HOST'] = os.getenv('MCH_DB_HOST')
app.config['MYSQL_USER'] = os.getenv('MCH_DB_USER')
app.config['MYSQL_PASSWORD'] = os.getenv('MCH_DB_PASSWORD')
app.config['MYSQL_DB'] = os.getenv('MCH_DB_NAME')
app.config['MYSQL_PORT'] = int(os.getenv('MCH_DB_PORT'))
app.config['SECRET_KEY'] = os.getenv("APP_SECRET")
mysql = MySQL(app)
api = Api(app)
# dataframe for stations table
stnmdata = pd.DataFrame()
# read MCH languaje definition from mch.dbn
filemch = open('mch.dbn', 'r')
filemch.readline() # odbc connector
filemch.readline() # mysql5
filemch.readline() # interface languaje
mchlang = filemch.readline() # database languaje
# read fields and tables names definition file
deftbfl = pd.read_csv('MCHtablasycampos.def', sep = "\t", names = ['sec','type', 'id_sec', 'esp', 'eng', 'fra', '4', 'comment'], encoding='utf_8')
# new dataframe for especific languaje
ltbfl = pd.DataFrame()
# looking for especific fields and tables for the languaje
if int(mchlang) == 1:
ltbfl = deftbfl[['id_sec','esp']]
ltbfl.set_index('id_sec')
if int(mchlang) == 2:
ltbfl = deftbfl[['id_sec','eng']]
ltbfl.set_index('id_sec')
if int(mchlang) == 3:
ltbfl = deftbfl[['id_sec','fra']]
ltbfl.set_index('id_sec')
def deg_to_dms(deg):
d = int(deg)
md = abs(deg - d) * 60
m = int(md)
sd = (md - m) * 60
return [d, m, sd]
class stations(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
strqry='select * from ' +stntable.iloc[0,1] +' order by ' +stnfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH(2)','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stations, "/API/stations")
qry_station_req_arg = reqparse.RequestParser()
pars = qry_station_req_arg.add_argument("stn_id",type=str,help="Station ID",required=True)
class qry_station(Resource):
def get(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='select * from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
qrystation = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=qrystation,columns=['Station','StationName','StationName2','TimeZone','Longitude','Latitude','Altitude','Longitude2','Latitude2','DMSlongitude','DMSLatitude','Statee','RegManagmt','Catchment','Subcatchment',
'OperatnlRegion','HydroReg','RH','Municipality','CodeB','CodeG','CodeCB','CodePB','CodeE','CodeCL','CodeHG','CodePG','CodeNw','Code1','Code2','Code3','MaxOrdStrgLvl','MaxOrdStrgVol',
'MaxExtStrgLvl','MaxExtStrgVol','SpillwayLevel','SpillwayStorage','FreeSpillwayLevel','FreeSpillwayStorage','DeadStrgLevel','DeadStrgCapac','UsableStorageCapLev','UsableStorage','HoldingStorage',
'Key1fil','Key2fil','Key3fil','CritLevelSta','MinLevelSta','MaxLevelSta','CritFlow','MinDischarge','MaxDischarge','Stream','Distance','Infrastructure','Type','Usee'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Station not found...")
#abort_if_stn_not_exist("stn_id")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('stn_name')
parser.add_argument('stn_name2')
parser.add_argument('t_zone')
parser.add_argument('long')
parser.add_argument('lat')
parser.add_argument('alt')
parser.add_argument('state_id')
parser.add_argument('reg_m')
parser.add_argument('catchm')
parser.add_argument('s_cat')
parser.add_argument('o_reg')
parser.add_argument('hydro_r')
parser.add_argument('rh')
parser.add_argument('mun_id')
parser.add_argument('mosl')
parser.add_argument('mosv')
parser.add_argument('mesl')
parser.add_argument('mesv')
parser.add_argument('s_level')
parser.add_argument('s_stor')
parser.add_argument('fs_level')
parser.add_argument('fs_stor')
parser.add_argument('ds_level')
parser.add_argument('ds_cap')
parser.add_argument('us_capl')
parser.add_argument('ustor')
parser.add_argument('hstor')
parser.add_argument('crl_s')
parser.add_argument('mnl_s')
parser.add_argument('mxl_s')
parser.add_argument('cr_f')
parser.add_argument('mn_dis')
parser.add_argument('mx_dis')
parser.add_argument('stream')
parser.add_argument('dist')
parser.add_argument('infr')
parser.add_argument('type')
parser.add_argument('use')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
stn_id = args.get('stn_id')
stn_name = args.get('stn_name')
stn_name2 = args.get('stn_name2')
t_zone = args.get('t_zone')
long2 = args.get('long')
lat2 = args.get('lat')
alt = args.get('alt')
state_id = args.get('state_id')
reg_m = args.get('reg_m')
catchm = args.get('catchm')
s_cat = args.get('s_cat')
o_reg = args.get('o_reg')
hydro_r = args.get('hydro_r')
rh = args.get('rh')
mun_id = args.get('mun_id')
mosl = args.get('mosl')
mosv = args.get('mosv')
mesl = args.get('mesl')
mesv = args.get('mesv')
s_level = args.get('s_level')
s_stor = args.get('s_stor')
fs_level = args.get('fs_level')
fs_stor = args.get('fs_stor')
ds_level = args.get('ds_level')
ds_cap = args.get('ds_cap')
us_capl = args.get('us_capl')
ustor = args.get('ustor')
hstor = args.get('hstor')
crl_s = args.get('crl_s')
mnl_s = args.get('mnl_s')
mxl_s = args.get('mxl_s')
cr_f = args.get('cr_f')
mn_dis = args.get('mn_dis')
mx_dis = args.get('mx_dis')
stream = args.get('stream')
dist = args.get('dist')
infr = args.get('infr')
typee = args.get('type')
usee = args.get('use')
# check if input is at file
if jfile in (None, ''):
Latitude=deg_to_dms(float(lat2))
Longitude=deg_to_dms(float(long2))
slong2=str(Longitude[0])+'°'+str(Longitude[1]) +'´' +str(Longitude[2])
slat2=str(Latitude[0])+'°'+str(Latitude[1]) +'´' +str(Latitude[2])
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(stn_id) +'","' +str(stn_name) +'","' +str(stn_name2) +'","' +str(t_zone) +'","' + str(long2)
+ '","' +str(lat2) +'","' +str(alt) +'","' +str(long2) +'","' +str(lat2) +'","' +slong2 +'","' +slat2 +'","' +str(state_id) +'","' +str(reg_m)
+ '","' +str(catchm) +'","' +str(s_cat) +'","' +str(o_reg) +'","' +str(hydro_r) +'","' +str(rh) +'","' +str(mun_id) +'","","","","","","","","","","","","","' + str(mosl)
+ '","' +str(mosv) +'","' +str(mesl) +'","' +str(mesv) +'","' +str(s_level) +'","' +str(s_stor) +'","' +str(fs_level) +'","' + str(fs_stor)
+ '","' +str(ds_level) +'","' +str(ds_cap) +'","' +str(us_capl) +'","' +str(ustor) +'","' +str(hstor) +'","","","","' +str(crl_s) +'","' + str(mnl_s)
+ '","' +str(mxl_s) +'","' +str(cr_f) +'","' +str(mn_dis) +'","' +str(mx_dis) +'","' +str(stream) +'","' +str(dist) +'","' +str(infr) +'","' + str(typee)
+ '","' +str(usee) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'","' +data.iloc[int(n),3] +'","' + data.iloc[int(n),4]
+ '","' +data.iloc[int(n),5] +'","' +str(data.iloc[int(n),6]) +'","' +str(data.iloc[int(n),7]) +'","' +str(data.iloc[int(n),8]) +'","' +data.iloc[int(n),9] +'","' +data.iloc[int(n),10] +'","' +data.iloc[int(n),11]
+ '","' +data.iloc[int(n),12] + '","' +data.iloc[int(n),13] +'","' +data.iloc[int(n),14] +'","' +data.iloc[int(n),15] +'","' +data.iloc[int(n),16] +'","' +data.iloc[int(n),17] +'","' +data.iloc[int(n),18]
+ '","' +data.iloc[int(n),19] +'","' +data.iloc[int(n),20] +'","' +data.iloc[int(n),21] +'","' +data.iloc[int(n),22] +'","' +data.iloc[int(n),23] +'","' +data.iloc[int(n),24] +'","' +data.iloc[int(n),25]
+ '","' +data.iloc[int(n),26] + '","' +data.iloc[int(n),27] +'","' +data.iloc[int(n),28] +'","' +data.iloc[int(n),29] +'","' +data.iloc[int(n),30] +'","' +data.iloc[int(n),31]
+ '","' +data.iloc[int(n),32] +'","' +data.iloc[int(n),33] +'","' +data.iloc[int(n),34] +'","' +data.iloc[int(n),35] +'","' +data.iloc[int(n),36] +'","' +data.iloc[int(n),37] +'","' + data.iloc[int(n),38]
+ '","' +data.iloc[int(n),39] +'","' +data.iloc[int(n),40] +'","' +data.iloc[int(n),41] +'","' +data.iloc[int(n),42] +'","' +data.iloc[int(n),43] +'","' +data.iloc[int(n),44] +'","' +data.iloc[int(n),45]
+ '","' +data.iloc[int(n),46] +'","' +data.iloc[int(n),47] +'","' + data.iloc[int(n),48] +'","' +data.iloc[int(n),49] +'","' +data.iloc[int(n),50] +'","' +data.iloc[int(n),51] +'","' +data.iloc[int(n),52]
+ '","' +data.iloc[int(n),53] +'","' +data.iloc[int(n),54] +'","' +data.iloc[int(n),55] +'","' +data.iloc[int(n),56] +'","' +data.iloc[int(n),57] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'","' +data.iloc[0,3] +'","' + data.iloc[0,4]
+ '","' +data.iloc[0,5] +'","' +str(data.iloc[0,6]) +'","' +str(data.iloc[0,7]) +'","' +str(data.iloc[0,8]) +'","' +data.iloc[0,9] +'","' +data.iloc[0,10] +'","' +data.iloc[0,11]
+ '","' +data.iloc[0,12] + '","' +data.iloc[0,13] +'","' +data.iloc[0,14] +'","' +data.iloc[0,15] +'","' +data.iloc[0,16] +'","' +data.iloc[0,17] +'","' +data.iloc[0,18]
+ '","' +data.iloc[0,19] +'","' +data.iloc[0,20] +'","' +data.iloc[0,21] +'","' +data.iloc[0,22] +'","' +data.iloc[0,23] +'","' +data.iloc[0,24] +'","' +data.iloc[0,25]
+ '","' +data.iloc[0,26] + '","' +data.iloc[0,27] +'","' +data.iloc[0,28] +'","' +data.iloc[0,29] +'","' +data.iloc[0,30] +'","' +data.iloc[0,31]
+ '","' +data.iloc[0,32] +'","' +data.iloc[0,33] +'","' +data.iloc[0,34] +'","' +data.iloc[0,35] +'","' +data.iloc[0,36] +'","' +data.iloc[0,37] +'","' + data.iloc[0,38]
+ '","' +data.iloc[0,39] +'","' +data.iloc[0,40] +'","' +data.iloc[0,41] +'","' +data.iloc[0,42] +'","' +data.iloc[0,43] +'","' +data.iloc[0,44] +'","' +data.iloc[0,45]
+ '","' +data.iloc[0,46] +'","' +data.iloc[0,47] +'","' + data.iloc[0,48] +'","' +data.iloc[0,49] +'","' +data.iloc[0,50] +'","' +data.iloc[0,51] +'","' +data.iloc[0,52]
+ '","' +data.iloc[0,53] +'","' +data.iloc[0,54] +'","' +data.iloc[0,55] +'","' +data.iloc[0,56] +'","' +data.iloc[0,57] +'")')
qry.execute(strqry)
return 'Station stored',201
def delete(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
args = parser.parse_args()
stn_id = args.get('stn_id')
strqry='delete from ' +stntable.iloc[0,1] +' where ' +stnfield.iloc[0,1] +'="'+ stn_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Station deleted',204
api.add_resource(qry_station, "/API/stations/qry_station")
class stngroups(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(stngroups, "/API/stngroups")
class qry_stngroup(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Stngroup','Secuen','Station'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Stationgroup not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('file')
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
tdata=len(data.index)
rows=list(range(0,tdata))
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
return 'Stationgroup stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntGruposestac']
nfield = ltbfl[ltbfl['id_sec'] == 'ncGrupoEstac']
parser = reqparse.RequestParser()
parser.add_argument('stngp_id')
args = parser.parse_args()
stngp_id = args.get('stngp_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ stngp_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Stationgroup deleted',204
api.add_resource(qry_stngroup, "/API/stngroups/qry_stngroup")
class variables(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
strqry='select distinct(' +nfield.iloc[0,1] +') from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(variables, "/API/variables")
class qry_variable(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
nfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
parser = reqparse.RequestParser()
parser.add_argument('var_id')
args = parser.parse_args()
var_id = args.get('var_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Variable','VariabAbbrev','VariabDescrn','TableName','Unit','TypeDDorDE','CumulType','NbrDecimal','CalcbyGrp','CalcDTaD'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Variable not found...")
return parsed
api.add_resource(qry_variable, "/API/variables/qry_variable")
class states(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(states, "/API/states")
class qry_state(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
state_id = args.get('state_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Statee','State2','Statename'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="State not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('state_id')
parser.add_argument('state_2')
parser.add_argument('state_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
state_id = args.get('state_id')
state_2 = args.get('state_2')
state_name = args.get('state_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(state_id) +'","' +str(state_2) +'","' +str(state_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'State stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntEstados']
nfield = ltbfl[ltbfl['id_sec'] == 'ncEstado']
parser = reqparse.RequestParser()
parser.add_argument('state_id')
args = parser.parse_args()
stngp_id = args.get('state_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ state_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'State deleted',204
api.add_resource(qry_state, "/API/states/qry_state")
class municipalities(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(municipalities, "/API/municipalities")
class qry_municipality(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('mun_id')
args = parser.parse_args()
mun_id = args.get('mun_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ mun_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Municipality','Municipality2','MunicipalityName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Municipality not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('mun_id')
parser.add_argument('mun_2')
parser.add_argument('mun_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
mun_id = args.get('mun_id')
mun_2 = args.get('mun_2')
mun_name = args.get('mun_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(mun_id) +'","' +str(mun_2) +'","' +str(mun_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Municipality stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntMunicipios']
nfield = ltbfl[ltbfl['id_sec'] == 'ncMunicipio']
parser = reqparse.RequestParser()
parser.add_argument('mun_id')
args = parser.parse_args()
stngp_id = args.get('mun_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ mun_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Municipality deleted',204
api.add_resource(qry_municipality, "/API/municipalities/qry_municipality")
class hydroregions(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Hydroreg','Hydroreg2','HydrRegionName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(hydroregions, "/API/hydroregions")
class qry_hydroregion(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('hr_id')
args = parser.parse_args()
hr_id = args.get('hr_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ hr_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Hydroreg','Hydroreg2','HydrRegionName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Hydro Region not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('hr_id')
parser.add_argument('hr_2')
parser.add_argument('hr_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
hr_id = args.get('hr_id')
hr_2 = args.get('hr_2')
hr_name = args.get('hr_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(hr_id) +'","' +str(hr_2) +'","' +str(hr_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Hydrological Region stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntRegionhidr']
nfield = ltbfl[ltbfl['id_sec'] == 'ncReghidr']
parser = reqparse.RequestParser()
parser.add_argument('hr_id')
args = parser.parse_args()
hr_id = args.get('hr_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ hr_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Hydrological Region deleted',204
api.add_resource(qry_hydroregion, "/API/hydroregions/qry_hydroregion")
class catchments(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Catchment','Catchment2','CatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(catchments, "/API/catchments")
class qry_catchment(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('cat_id')
args = parser.parse_args()
cat_id = args.get('cat_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ cat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Catchment','Catchment2','CatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Catchment not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('cat_id')
parser.add_argument('cat_2')
parser.add_argument('cat_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
cat_id = args.get('cat_id')
cat_2 = args.get('cat_2')
cat_name = args.get('cat_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(cat_id) +'","' +str(cat_2) +'","' +str(cat_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Catchment stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntCuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncCuenca']
parser = reqparse.RequestParser()
parser.add_argument('cat_id')
args = parser.parse_args()
cat_id = args.get('cat_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ cat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Catchment deleted',204
api.add_resource(qry_catchment, "/API/catchments/qry_catchment")
class subcatchments(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Subcatchment','Subcatchment2','SubCatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(subcatchments, "/API/subcatchments")
class qry_subcatchment(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('scat_id')
args = parser.parse_args()
scat_id = args.get('scat_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ scat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Subcatchment','Subcatchment2','SubCatchmentName'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Subcatchment not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('scat_id')
parser.add_argument('scat_2')
parser.add_argument('scat_name')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
scat_id = args.get('scat_id')
scat_2 = args.get('scat_2')
scat_name = args.get('scat_name')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(scat_id) +'","' +str(scat_2) +'","' +str(scat_name) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'","' +data.iloc[int(n),1] +'","' +data.iloc[int(n),2] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'","' +data.iloc[0,2] +'")')
qry.execute(strqry)
return 'Subcatchment stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntSubcuencas']
nfield = ltbfl[ltbfl['id_sec'] == 'ncSubcuenca']
parser = reqparse.RequestParser()
parser.add_argument('scat_id')
args = parser.parse_args()
scat_id = args.get('scat_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ scat_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Subcatchment deleted',204
api.add_resource(qry_subcatchment, "/API/subcatchments/qry_subcatchment")
class units(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
strqry='select * from ' +ntable.iloc[0,1] +' order by ' +nfield.iloc[0,1]
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
stnmdata = pd.DataFrame(data=dataqry,columns=['Unit','UnitDescription'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
return parsed
api.add_resource(units, "/API/units")
class qry_unit(Resource):
def get(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('unit_id')
args = parser.parse_args()
unit_id = args.get('unit_id')
strqry='select * from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ unit_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
stnmdata = pd.DataFrame(data=dataqry,columns=['Unit','UnitDescription'])
jsondata = stnmdata.to_json(orient="records")
parsed = json.loads(jsondata)
else:
abort(404, message="Unit not found...")
return parsed
def post(self):
qry = mysql.connection.cursor()
stntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('unit_id')
parser.add_argument('unit_desc')
args = parser.parse_args()
# retrieve parameters
jfile = args.get('file')
unit_id = args.get('unit_id')
unit_desc = args.get('unit_desc')
# check if input is at file
if jfile in (None, ''):
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +str(unit_id) +'","' +str(unit_desc) +'")')
qry.execute(strqry)
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = pd.DataFrame(jdata)
fields = data.columns.tolist()
tdata=len(data.index)
rows=list(range(0,tdata))
if int(tdata) > 1:
for n in rows:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[int(n),0] +'")')
qry.execute(strqry)
else:
strqry = ('insert ignore into ' +stntable.iloc[0,1] +' values("' +data.iloc[0,0] +'","' +data.iloc[0,1] +'")')
qry.execute(strqry)
return 'Unit stored',201
def delete(self):
qry = mysql.connection.cursor()
ntable = ltbfl[ltbfl['id_sec'] == 'ntUnidades']
nfield = ltbfl[ltbfl['id_sec'] == 'ncUnidad']
parser = reqparse.RequestParser()
parser.add_argument('unit_id')
args = parser.parse_args()
unit_id = args.get('unit_id')
strqry='delete from ' +ntable.iloc[0,1] +' where ' +nfield.iloc[0,1] +'="'+ unit_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
return 'Unit deleted',204
api.add_resource(qry_unit, "/API/units/qry_unit")
class dailydata(Resource):
def get(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vtfield = ltbfl[ltbfl['id_sec'] == 'ncTipoDDoDE']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
maxvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMax']
minvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMin']
maxdatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMax']
mindatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMin']
parser = reqparse.RequestParser()
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('date_ini')
parser.add_argument('date_end')
parser.add_argument('datee')
args = parser.parse_args()
stn_id = args.get('stn_id')
var_id = args.get('var_id')
date_ini = args.get('date_ini')
date_end = args.get('date_end')
datee = args.get('datee')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
extreme = False
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = variable.iloc[0,5] +variable.iloc[0,3]
if variable.iloc[0,5] == 'DE':
extreme = True
else:
abort(404, message="Variable not found...")
qry.close
# check if it is a date or a period
if datee in (None, ''):
if extreme == True:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] + ',' +maxdatefield.iloc[0,1] + ',' +maxvalfield.iloc[0,1] + ',' +mindatefield.iloc[0,1] + ',' +minvalfield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'>="' + str(date_ini) +'" and ' + datefield.iloc[0,1] +'<="' + str(date_end) +'"'
else:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'>="' + str(date_ini) +'" and ' + datefield.iloc[0,1] +'<="' + str(date_end) +'"'
else:
if extreme == True:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] + ',' +maxdatefield.iloc[0,1] + ',' +maxvalfield.iloc[0,1] + ',' +mindatefield.iloc[0,1] + ',' +minvalfield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'="' + str(datee) +'"'
else:
strqry='select ' +stnfield.iloc[0,1] + ',' +datefield.iloc[0,1] + ',' +valuefield.iloc[0,1] +' from ' + tablename +' where ' + stnfield.iloc[0,1] +'="'+ str(stn_id) +'" and ' + datefield.iloc[0,1] +'="' + str(datee) +'"'
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
if rcount > 0:
if extreme == True:
ddata = pd.DataFrame(data=dataqry,columns=['Station','Date','Value','MaxValDate','MaxValue','MinValDate','MinValue'])
else:
ddata = pd.DataFrame(data=dataqry,columns=['Station','Date','Value'])
jsondata = ddata.to_json(orient="records",date_format='iso', date_unit='s')
parsed = json.loads(jsondata)
else:
abort(404, message="There is no data...")
return parsed
def post(self):
qry = mysql.connection.cursor()
vartable = ltbfl[ltbfl['id_sec'] == 'ntVariables']
varfield = ltbfl[ltbfl['id_sec'] == 'ncVariable']
vtfield = ltbfl[ltbfl['id_sec'] == 'ncTipoDDoDE']
vnfield = ltbfl[ltbfl['id_sec'] == 'ncNombreTabla']
stntable = ltbfl[ltbfl['id_sec'] == 'ntEstaciones']
stnfield = ltbfl[ltbfl['id_sec'] == 'ncEstacion']
datefield = ltbfl[ltbfl['id_sec'] == 'ncFecha']
valuefield = ltbfl[ltbfl['id_sec'] == 'ncValor']
maxvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMax']
minvalfield = ltbfl[ltbfl['id_sec'] == 'ncValorMin']
maxdatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMax']
mindatefield = ltbfl[ltbfl['id_sec'] == 'ncFechaHoraMin']
parser = reqparse.RequestParser()
parser.add_argument('file')
parser.add_argument('stn_id')
parser.add_argument('var_id')
parser.add_argument('datee')
parser.add_argument('value')
parser.add_argument('maxvaldate')
parser.add_argument('maxvalue')
parser.add_argument('minvaldate')
parser.add_argument('minvalue')
args = parser.parse_args()
jfile = args.get('file')
stn_id = args.get('stn_id')
var_id = args.get('var_id')
datee = args.get('datee')
value = args.get('value')
maxvaldate = args.get('maxvaldate')
maxvalue = args.get('maxvalue')
minvaldate = args.get('minvaldate')
minvalue = args.get('minvalue')
# query variable table for tablename
strqry='select * from ' +vartable.iloc[0,1] +' where ' +varfield.iloc[0,1] +'="'+ var_id +'"'
strqry=strqry.lower()
qry.execute(strqry)
datavar = qry.fetchall()
rcount=qry.rowcount
extreme = False
if rcount > 0:
variable = pd.DataFrame(data=datavar, dtype="string")
tablename = variable.iloc[0,5] +variable.iloc[0,3]
if (variable.iloc[0,5] == 'DE' or variable.iloc[0,5] == 'de'):
extreme = True
else:
abort(404, message="Variable not found...")
qry.close
# verify if input is a file
if jfile in (None, ''):
# check if it is extreme data
if extreme == True:
strqry = ('insert ignore into ' +tablename +' values("' +str(stn_id) +'","' +str(datee) +'","' +str(value) + '","",' +str(maxvaldate) +'","' +str(maxvalue) +'",",""' +str(minvaldate) +'","' +str(minvalue) +'","","0","0","0","API")')
else:
strqry = ('insert ignore into ' +tablename +' values("' +str(stn_id) +'","' +str(datee) +'","' +str(value) +'","","0","0","0","API")')
strqry=strqry.lower()
qry.execute(strqry)
dataqry = qry.fetchall()
rcount=qry.rowcount
qry.close
else:
f=open(jfile,'r')
filej = f.read()
f.close()
jdata = json.loads(filej)
data = | pd.DataFrame(jdata) | pandas.DataFrame |
import unittest
import numpy as np
import pandas as pd
from haychecker.chc.metrics import constraint
class TestConstraint(unittest.TestCase):
def test_empty(self):
df = pd.DataFrame()
df["c1"] = []
df["c2"] = []
condition1 = {"column": "c1", "operator": "lt", "value": 1000}
condition2 = {"column": "c1", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0], [1], conditions, df)[0]
self.assertEqual(r, 100.)
def test_allnull(self):
df = pd.DataFrame()
df["c1"] = [None for _ in range(100)]
df["c2"] = [np.NaN for _ in range(100)]
df["c3"] = [None for _ in range(100)]
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 100.0)
def test_allnull_with_conditions(self):
df = pd.DataFrame()
df["c1"] = [None for _ in range(100)]
df["c2"] = [None for _ in range(100)]
df["c3"] = [np.NaN for _ in range(100)]
condition1 = {"column": "c1", "operator": "lt", "value": 1000}
condition2 = {"column": "c1", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0, 1], [2], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_respected(self):
df = pd.DataFrame()
c1 = [chr(1) for _ in range(50)]
c2 = [2 for _ in range(50)]
c3 = [2 / 0.6 for _ in range(50)]
c1.extend([None for _ in range(50)])
c2.extend([np.NaN for _ in range(50)])
c3.extend([None for _ in range(50)])
df["c1"] = c1
df["c2"] = c2
df["c3"] = c3
r = constraint([0, 1], [2], df=df)[0]
self.assertEqual(r, 100.0)
condition1 = {"column": "c2", "operator": "lt", "value": 2}
condition2 = {"column": "c2", "operator": "gt", "value": 0}
conditions = [condition1, condition2]
r = constraint([0, 1], [2], conditions, df)[0]
self.assertEqual(r, 100.0)
r = constraint([0, 2], [1], conditions, df)[0]
self.assertEqual(r, 100.0)
def test_halfnull_halfequal_notrespected1(self):
df = | pd.DataFrame() | pandas.DataFrame |
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.function.evaluation import evaluate_ranking_algorithm
from brightics.function.recommendation.als import als_recommend
from brightics.common.datasets import load_iris
import unittest
import pandas as pd
import numpy as np
import HtmlTestRunner
import os
class EvaluateRankingAlgorithm(unittest.TestCase):
def setUp(self):
print("*** Evaluate Ranking Algorithm UnitTest Start ***")
data = dict()
np.random.seed(3) ; data['user'] = np.random.randint(10, size=100)
np.random.seed(10) ; data['item'] = np.random.randint(10, size=100)
np.random.seed(5) ; data['rating'] = np.random.randint(5, size=100)
self.testdata = data
def tearDown(self):
print("*** Evaluate Ranking Algorithm UnitTest End ***")
def test(self):
recommend_result = als_recommend(self.testdata, user_col='user', item_col='item', rating_col='rating', filter=False, seed=5)['out_table']
result = evaluate_ranking_algorithm(table1= | pd.DataFrame(self.testdata) | pandas.DataFrame |
import os
import pickle
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
import numpy as np
import pandas as pd
from scipy.special import softmax
from tqdm import tqdm
from utils import wln_loss, regio_acc, lr_multiply_ratio, parse_args
import timeit
args, dataloader, classifier = parse_args(cross_val=True)
reactivity_data = | pd.read_csv(args.data_path, index_col=0) | pandas.read_csv |
"""
"""
"""
>>> # ---
>>> # SETUP
>>> # ---
>>> import os
>>> import logging
>>> logger = logging.getLogger('PT3S.Rm')
>>> # ---
>>> # path
>>> # ---
>>> if __name__ == "__main__":
... try:
... dummy=__file__
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ','path = os.path.dirname(__file__)'," ."))
... path = os.path.dirname(__file__)
... except NameError:
... logger.debug("{0:s}{1:s}{2:s}".format('DOCTEST: __main__ Context: ',"path = '.' because __file__ not defined and: "," from Rm import Rm"))
... path = '.'
... from Rm import Rm
... else:
... path = '.'
... logger.debug("{0:s}{1:s}".format('Not __main__ Context: ',"path = '.' ."))
>>> try:
... from PT3S import Mx
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Mx: ImportError: ","trying import Mx instead ... maybe pip install -e . is active ..."))
... import Mx
>>> try:
... from PT3S import Xm
... except ImportError:
... logger.debug("{0:s}{1:s}".format("DOCTEST: from PT3S import Xm: ImportError: ","trying import Xm instead ... maybe pip install -e . is active ..."))
... import Xm
>>> # ---
>>> # testDir
>>> # ---
>>> # globs={'testDir':'testdata'}
>>> try:
... dummy= testDir
... except NameError:
... testDir='testdata'
>>> # ---
>>> # dotResolution
>>> # ---
>>> # globs={'dotResolution':''}
>>> try:
... dummy= dotResolution
... except NameError:
... dotResolution=''
>>> import pandas as pd
>>> import matplotlib.pyplot as plt
>>> pd.set_option('display.max_columns',None)
>>> pd.set_option('display.width',666666666)
>>> # ---
>>> # LocalHeatingNetwork SETUP
>>> # ---
>>> xmlFile=os.path.join(os.path.join(path,testDir),'LocalHeatingNetwork.XML')
>>> xm=Xm.Xm(xmlFile=xmlFile)
>>> mx1File=os.path.join(path,os.path.join(testDir,'WDLocalHeatingNetwork\B1\V0\BZ1\M-1-0-1'+dotResolution+'.MX1'))
>>> mx=Mx.Mx(mx1File=mx1File,NoH5Read=True,NoMxsRead=True)
>>> mx.setResultsToMxsFile(NewH5Vec=True)
5
>>> xm.MxSync(mx=mx)
>>> rm=Rm(xm=xm,mx=mx)
>>> # ---
>>> # Plot 3Classes False
>>> # ---
>>> plt.close('all')
>>> ppi=72 # matplotlib default
>>> dpi_screen=2*ppi
>>> fig=plt.figure(dpi=dpi_screen,linewidth=1.)
>>> timeDeltaToT=mx.df.index[2]-mx.df.index[0]
>>> # 3Classes und FixedLimits sind standardmaessig Falsch; RefPerc ist standardmaessig Wahr
>>> # die Belegung von MCategory gemaess FixedLimitsHigh/Low erfolgt immer ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66,pFWVBGCategory=['BLNZ1u5u7'],pVICsDf=pd.DataFrame({'Kundenname': ['VIC1'],'Knotenname': ['V-K007']}))
>>> # ---
>>> # Check pFWVB Return
>>> # ---
>>> f=lambda x: "{0:8.5f}".format(x)
>>> print(pFWVB[['Measure','MCategory','GCategory','VIC']].round(2).to_string(formatters={'Measure':f}))
Measure MCategory GCategory VIC
0 0.81000 Top BLNZ1u5u7 NaN
1 0.67000 Middle NaN
2 0.66000 Middle BLNZ1u5u7 NaN
3 0.66000 Bottom BLNZ1u5u7 VIC1
4 0.69000 Middle NaN
>>> # ---
>>> # Print
>>> # ---
>>> (wD,fileName)=os.path.split(xm.xmlFile)
>>> (base,ext)=os.path.splitext(fileName)
>>> plotFileName=wD+os.path.sep+base+'.'+'pdf'
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
>>> plt.savefig(plotFileName,dpi=2*dpi_screen)
>>> os.path.exists(plotFileName)
True
>>> # ---
>>> # Plot 3Classes True
>>> # ---
>>> plt.close('all')
>>> # FixedLimits wird automatisch auf Wahr gesetzt wenn 3Classes Wahr ...
>>> pFWVB=rm.pltNetDHUS(timeDeltaToT=timeDeltaToT,pFWVBMeasure3Classes=True,pFWVBMeasureCBFixedLimitHigh=0.80,pFWVBMeasureCBFixedLimitLow=0.66)
>>> # ---
>>> # LocalHeatingNetwork Clean Up
>>> # ---
>>> if os.path.exists(mx.h5File):
... os.remove(mx.h5File)
>>> if os.path.exists(mx.mxsZipFile):
... os.remove(mx.mxsZipFile)
>>> if os.path.exists(mx.h5FileVecs):
... os.remove(mx.h5FileVecs)
>>> if os.path.exists(plotFileName):
... os.remove(plotFileName)
"""
__version__='172.16.58.3.dev1'
import warnings # 3.6
#...\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
# from ._conv import register_converters as _register_converters
warnings.simplefilter(action='ignore', category=FutureWarning)
#C:\Users\Wolters\Anaconda3\lib\site-packages\matplotlib\cbook\deprecation.py:107: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance.
# warnings.warn(message, mplDeprecation, stacklevel=1)
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
import os
import sys
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import pandas as pd
import h5py
from collections import namedtuple
from operator import attrgetter
import subprocess
import warnings
import tables
import math
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.colorbar import make_axes
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
from matplotlib import markers
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import scipy
import networkx as nx
from itertools import chain
import math
import sys
from copy import deepcopy
from itertools import chain
import scipy
from scipy.signal import savgol_filter
import logging
# ---
# --- PT3S Imports
# ---
logger = logging.getLogger('PT3S')
if __name__ == "__main__":
logger.debug("{0:s}{1:s}".format('in MODULEFILE: __main__ Context','.'))
else:
logger.debug("{0:s}{1:s}{2:s}{3:s}".format('in MODULEFILE: Not __main__ Context: ','__name__: ',__name__," ."))
try:
from PT3S import Mx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Mx - trying import Mx instead ... maybe pip install -e . is active ...'))
import Mx
try:
from PT3S import Xm
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Xm - trying import Xm instead ... maybe pip install -e . is active ...'))
import Xm
try:
from PT3S import Am
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Am - trying import Am instead ... maybe pip install -e . is active ...'))
import Am
try:
from PT3S import Lx
except ImportError:
logger.debug("{0:s}{1:s}".format('ImportError: ','from PT3S import Lx - trying import Lx instead ... maybe pip install -e . is active ...'))
import Lx
# ---
# --- main Imports
# ---
import argparse
import unittest
import doctest
import math
from itertools import tee
# --- Parameter Allgemein
# -----------------------
DINA6 = (4.13 , 5.83)
DINA5 = (5.83 , 8.27)
DINA4 = (8.27 , 11.69)
DINA3 = (11.69 , 16.54)
DINA2 = (16.54 , 23.39)
DINA1 = (23.39 , 33.11)
DINA0 = (33.11 , 46.81)
DINA6q = ( 5.83, 4.13)
DINA5q = ( 8.27, 5.83)
DINA4q = ( 11.69, 8.27)
DINA3q = ( 16.54,11.69)
DINA2q = ( 23.39,16.54)
DINA1q = ( 33.11,23.39)
DINA0q = ( 46.81,33.11)
dpiSize=72
DINA4_x=8.2677165354
DINA4_y=11.6929133858
DINA3_x=DINA4_x*math.sqrt(2)
DINA3_y=DINA4_y*math.sqrt(2)
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
ylimpD=(-5,70)
ylimpDmlc=(600,1350) #(300,1050)
ylimQD=(-75,300)
ylim3rdD=(0,3)
yticks3rdD=[0,1,2,3]
yGridStepsD=30
yticksALD=[0,3,4,10,20,30,40]
ylimALD=(yticksALD[0],yticksALD[-1])
yticksRD=[0,2,4,10,15,30,45]
ylimRD=(-yticksRD[-1],yticksRD[-1])
ylimACD=(-5,5)
yticksACD=[-5,0,5]
yticksTVD=[0,100,135,180,200,300]
ylimTVD=(yticksTVD[0],yticksTVD[-1])
plotTVAmLabelD='TIMER u. AM [Sek. u. (N)m3*100]'
def getDerivative(df,col,shiftSize=1,windowSize=60,fct=None,savgol_polyorder=None):
"""
returns a df
df: the df
col: the col of df to be derived
shiftsize: the Difference between 2 indices for dValue and dt
windowSize: size for rolling mean or window_length of savgol_filter; choosen filtertechnique is applied after fct
windowsSize must be an even number
for savgol_filter windowsSize-1 is used
fct: function to be applied on dValue/dt
savgol_polyorder: if not None savgol_filter is applied; pandas' rolling.mean() is applied otherwise
new cols:
dt (with shiftSize)
dValue (from col)
dValueDt (from col); fct applied
dValueDtFiltered; choosen filtertechnique is applied
"""
mDf=df.dropna().copy(deep=True)
try:
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
if savgol_polyorder == None:
mDf['dValueDtFiltered']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
else:
mDf['dValueDtFiltered']=savgol_filter(mDf['dValueDt'].values,windowSize-1, savgol_polyorder)
mDf=mDf.iloc[windowSize/2+1+savgol_polyorder-1:]
#mDf=mDf.iloc[windowSize-1:]
except Exception as e:
raise e
finally:
return mDf
def fCVDNodesFromName(x):
Nodes=x.replace('°','~')
Nodes=Nodes.split('~')
Nodes =[Node.lstrip().rstrip() for Node in Nodes if len(Node)>0]
return Nodes
def fgetMaxpMinFromName(CVDName,dfSegsNodesNDataDpkt):
"""
returns max. pMin for alle NODEs in CVDName
"""
nodeLst=fCVDNodesFromName(CVDName)
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['NODEsName'].isin(nodeLst)][['pMin','pMinMlc']]
s=df.max()
return s.pMin
# --- Funktionen Allgemein
# -----------------------
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def genTimespans(timeStart
,timeEnd
,timeSpan=pd.Timedelta('12 Minutes')
,timeOverlap=pd.Timedelta('0 Seconds')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
):
# generates timeSpan-Sections
# if timeStart is
# an int, it is considered as the number of desired Sections before timeEnd; timeEnd must be a time
# a time, it is considered as timeStart
# if timeEnd is
# an int, it is considered as the number of desired Sections after timeStart; timeStart must be a time
# a time, it is considered as timeEnd
# if timeSpan is
# an int, it is considered as the number of desired Sections
# a time, it is considered as timeSpan
# returns an array of tuples
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
xlims=[]
try:
if type(timeStart) == int:
numOfDesiredSections=timeStart
timeStartEff=timeEnd+timeEndPostfix-numOfDesiredSections*timeSpan+(numOfDesiredSections-1)*timeOverlap-timeStartPraefix
else:
timeStartEff=timeStart-timeStartPraefix
logger.debug("{0:s}timeStartEff: {1:s}".format(logStr,str(timeStartEff)))
if type(timeEnd) == int:
numOfDesiredSections=timeEnd
timeEndEff=timeStart-timeStartPraefix+numOfDesiredSections*timeSpan-(numOfDesiredSections-1)*timeOverlap+timeEndPostfix
else:
timeEndEff=timeEnd+timeEndPostfix
logger.debug("{0:s}timeEndEff: {1:s}".format(logStr,str(timeEndEff)))
if type(timeSpan) == int:
numOfDesiredSections=timeSpan
dt=timeEndEff-timeStartEff
timeSpanEff=dt/numOfDesiredSections+(numOfDesiredSections-1)*timeOverlap
else:
timeSpanEff=timeSpan
logger.debug("{0:s}timeSpanEff: {1:s}".format(logStr,str(timeSpanEff)))
logger.debug("{0:s}timeOverlap: {1:s}".format(logStr,str(timeOverlap)))
timeStartAct = timeStartEff
while timeStartAct < timeEndEff:
logger.debug("{0:s}timeStartAct: {1:s}".format(logStr,str(timeStartAct)))
timeEndAct=timeStartAct+timeSpanEff
xlim=(timeStartAct,timeEndAct)
xlims.append(xlim)
timeStartAct = timeEndAct - timeOverlap
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def gen2Timespans(
timeStart # Anfang eines "Prozesses"
,timeEnd # Ende eines "Prozesses"
,timeSpan=pd.Timedelta('12 Minutes')
,timeStartPraefix=pd.Timedelta('0 Seconds')
,timeEndPostfix=pd.Timedelta('0 Seconds')
,roundStr=None # i.e. '5min': timeStart.round(roundStr) und timeEnd dito
):
"""
erzeugt 2 gleich lange Zeitbereiche
1 um timeStart herum
1 um timeEnd herum
"""
#print("timeStartPraefix: {:s}".format(str(timeStartPraefix)))
#print("timeEndPostfix: {:s}".format(str(timeEndPostfix)))
xlims=[]
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if roundStr != None:
timeStart=timeStart.round(roundStr)
timeEnd=timeEnd.round(roundStr)
xlims.append((timeStart-timeStartPraefix,timeStart-timeStartPraefix+timeSpan))
xlims.append((timeEnd+timeEndPostfix-timeSpan,timeEnd+timeEndPostfix))
return xlims
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return xlims
def fTotalTimeFromPairs(
x
,denominator=None # i.e. pd.Timedelta('1 minute') for totalTime in Minutes
,roundToInt=True # round to and return as int if denominator is specified; else td is rounded by 2
):
tdTotal=pd.Timedelta('0 seconds')
for idx,tPairs in enumerate(x):
t1,t2=tPairs
if idx==0:
tLast=t2
else:
if t1 <= tLast:
print("Zeitpaar überlappt?!")
td=t2-t1
if td < pd.Timedelta('1 seconds'):
pass
#print("Zeitpaar < als 1 Sekunde?!")
tdTotal=tdTotal+td
if denominator==None:
return tdTotal
else:
td=tdTotal / denominator
if roundToInt:
td=int(round(td,0))
else:
td=round(td,2)
return td
def findAllTimeIntervalls(
df
,fct=lambda row: True if row['col'] == 46 else False
,tdAllowed=None
):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
rows,cols=df.shape
if df.empty:
logger.debug("{:s}df ist leer".format(logStr))
elif rows == 1:
logger.debug("{:s}df hat nur 1 Zeile: {:s}".format(logStr,df.to_string()))
rowValue=fct(df.iloc[0])
if rowValue:
tPair=(df.index[0],df.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
# paarweise über alle Zeilen
for (i1, row1), (i2, row2) in pairwise(df.iterrows()):
row1Value=fct(row1)
row2Value=fct(row2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not row1Value and row2Value:
tEin=i2
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif row1Value and not row2Value:
if tEin != None:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
pass # sonst: Bed. ist jetzt Aus und war nicht Ein
# Bed. kann nur im ersten Fall Ein gehen
# wenn 1 x und 2 x
elif row1Value and row2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# letztes Paar
if row1Value and row2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
if tdAllowed != None:
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed)
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def findAllTimeIntervallsSeries(
s=pd.Series()
,fct=lambda x: True if x == 46 else False
,tdAllowed=None # if not None all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=True
):
"""
# if fct:
# alle [Zeitbereiche] finden fuer die fct Wahr ist; diese Zeitbereiche werden geliefert; es werden nur Paare geliefert; Wahr-Solitäre gehen nicht verloren sondern werden als Paar (t,t) geliefert
# Wahr-Solitäre sind NUR dann enthalten, wenn s nur 1 Wert enthält und dieser Wahr ist; das 1 gelieferte Paar enthaelt dann den Solitär-Zeitstempel für beide Zeiten
# tdAllowed can be be specified
# dann im Anschluss in Zeitbereiche zusammenfassen, die nicht mehr als tdAllowed auseinander liegen; diese Zeitbereiche werden dann geliefert
# if fct None:
# tdAllowed must be specified
# in Zeitbereiche zerlegen, die nicht mehr als Schwellwert tdAllowed auseinander liegen; diese Zeitbereiche werden geliefert
# generell hat jeder gelieferte Zeitbereich Anfang und Ende (d.h. 2 Zeiten), auch dann, wenn dadurch ein- oder mehrfach der Schwellwert ignoriert werden muss
# denn es soll kein Zeitbereich verloren gehen, der in s enthalten ist
# wenn s nur 1 Wert enthält, wird 1 Zeitpaar mit demselben Zeitstempel für beide Zeiten geliefert, wenn Wert nicht Null
# returns array of Time-Pair-Tuples
>>> import pandas as pd
>>> t=pd.Timestamp('2021-03-19 01:02:00')
>>> t1=t +pd.Timedelta('1 second')
>>> t2=t1+pd.Timedelta('1 second')
>>> t3=t2+pd.Timedelta('1 second')
>>> t4=t3+pd.Timedelta('1 second')
>>> t5=t4+pd.Timedelta('1 second')
>>> t6=t5+pd.Timedelta('1 second')
>>> t7=t6+pd.Timedelta('1 second')
>>> d = {t1: 46, t2: 0} # geht aus - kein Paar
>>> s1PaarGehtAus=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 46} # geht ein - kein Paar
>>> s1PaarGehtEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t5: 46, t6: 0} # geht ausE - kein Paar
>>> s1PaarGehtAusE=pd.Series(data=d, index=[t5, t6])
>>> d = {t5: 0, t6: 46} # geht einE - kein Paar
>>> s1PaarGehtEinE=pd.Series(data=d, index=[t5, t6])
>>> d = {t1: 46, t2: 46} # geht aus - ein Paar
>>> s1PaarEin=pd.Series(data=d, index=[t1, t2])
>>> d = {t1: 0, t2: 0} # geht aus - kein Paar
>>> s1PaarAus=pd.Series(data=d, index=[t1, t2])
>>> s2PaarAus=pd.concat([s1PaarGehtAus,s1PaarGehtAusE])
>>> s2PaarEin=pd.concat([s1PaarGehtEin,s1PaarGehtEinE])
>>> s2PaarAusEin=pd.concat([s1PaarGehtAus,s1PaarGehtEinE])
>>> s2PaarEinAus=pd.concat([s1PaarGehtEin,s1PaarGehtAusE])
>>> # 1 Wert
>>> d = {t1: 46} # 1 Wert - Wahr
>>> s1WertWahr=pd.Series(data=d, index=[t1])
>>> d = {t1: 44} # 1 Wert - Falsch
>>> s1WertFalsch=pd.Series(data=d, index=[t1])
>>> d = {t1: None} # 1 Wert - None
>>> s1WertNone=pd.Series(data=d, index=[t1])
>>> ###
>>> # 46 0
>>> # 0 46
>>> # 0 0
>>> # 46 46 !1 Paar
>>> # 46 0 46 0
>>> # 46 0 0 46
>>> # 0 46 0 46
>>> # 0 46 46 0 !1 Paar
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus)
[]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin)
[]
>>> findAllTimeIntervallsSeries(s1PaarEin)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarAus)
[]
>>> findAllTimeIntervallsSeries(s2PaarEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarAusEin)
[]
>>> findAllTimeIntervallsSeries(s2PaarEinAus)
[(Timestamp('2021-03-19 01:02:02'), Timestamp('2021-03-19 01:02:05'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertFalsch)
[]
>>> ###
>>> # 46 0 !1 Paar
>>> # 0 46 !1 Paar
>>> # 0 0 !1 Paar
>>> # 46 46 !1 Paar
>>> # 46 0 46 0 !2 Paare
>>> # 46 0 0 46 !2 Paare
>>> # 0 46 0 46 !2 Paare
>>> # 0 46 46 0 !2 Paare
>>> ###
>>> findAllTimeIntervallsSeries(s1PaarGehtAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarGehtEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s1PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02'))]
>>> findAllTimeIntervallsSeries(s2PaarAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarAusEin,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> findAllTimeIntervallsSeries(s2PaarEinAus,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:05'), Timestamp('2021-03-19 01:02:06'))]
>>> # 1 Wert
>>> findAllTimeIntervallsSeries(s1WertWahr,fct=None)
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:01'))]
>>> findAllTimeIntervallsSeries(s1WertNone,fct=None)
[]
>>> ###
>>> d = {t1: 0, t3: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t1, t3])
>>> findAllTimeIntervallsSeries(s1PaarmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:03'))]
>>> d = {t4: 0, t5: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t4, t5])
>>> s2PaarmZoZ=pd.concat([s1PaarmZ,s1PaaroZ])
>>> findAllTimeIntervallsSeries(s2PaarmZoZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t1: 0, t2: 0}
>>> s1PaaroZ=pd.Series(data=d, index=[t1, t2])
>>> d = {t3: 0, t5: 0}
>>> s1PaarmZ=pd.Series(data=d, index=[t3, t5])
>>> s2PaaroZmZ=pd.concat([s1PaaroZ,s1PaarmZ])
>>> findAllTimeIntervallsSeries(s2PaaroZmZ,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:05'))]
>>> ###
>>> d = {t6: 0, t7: 0}
>>> s1PaaroZ2=pd.Series(data=d, index=[t6, t7])
>>> d = {t4: 0}
>>> solitaer=pd.Series(data=d, index=[t4])
>>> s5er=pd.concat([s1PaaroZ,solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s5er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:02')), (Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
>>> s3er=pd.concat([s1PaaroZ,solitaer])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:01'), Timestamp('2021-03-19 01:02:04'))]
>>> s3er=pd.concat([solitaer,s1PaaroZ2])
>>> findAllTimeIntervallsSeries(s3er,fct=None,tdAllowed=pd.Timedelta('1 second'))
[(Timestamp('2021-03-19 01:02:04'), Timestamp('2021-03-19 01:02:07'))]
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
tPairs=[]
try:
if s.empty:
logger.debug("{:s}Series {!s:s} ist leer".format(logStr,s.name))
elif s.size == 1:
logger.debug("{:s}Series {!s:s} hat nur 1 Element: {:s}".format(logStr,s.name,s.to_string()))
if fct != None:
# 1 Paar mit selben Zeiten wenn das 1 Element Wahr
sValue=fct(s.iloc[0])
if sValue:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
# 1 Paar mit selben Zeiten wenn das 1 Element nicht None
sValue=s.iloc[0]
if sValue != None:
tPair=(s.index[0],s.index[0])
tPairs.append(tPair)
else:
pass
else:
tEin=None
if fct != None:
# paarweise über alle Zeiten
for idx,((i1, s1), (i2, s2)) in enumerate(pairwise(s.iteritems())):
s1Value=fct(s1)
s2Value=fct(s2)
# wenn 1 nicht x und 2 x tEin=t2 "geht Ein"
if not s1Value and s2Value:
tEin=i2
if idx > 0: # Info
pass
else:
# beim ersten Paar "geht Ein"
pass
# wenn 1 x und 2 nicht x tAus=t2 "geht Aus"
elif s1Value and not s2Value:
if tEin != None:
if tEin<i1:
# Paar speichern
tPair=(tEin,i1)
tPairs.append(tPair)
else:
# singulaeres Ereignis
# Paar mit selben Zeiten
tPair=(tEin,i1)
tPairs.append(tPair)
pass
else: # geht Aus ohne Ein zu sein
if idx > 0: # Info
pass
else:
# im ersten Paar
pass
# wenn 1 x und 2 x
elif s1Value and s2Value:
if tEin != None:
pass
else:
# im ersten Wertepaar ist der Bereich Ein
tEin=i1
# Behandlung letztes Paar
# bleibt Ein am Ende der Series: Paar speichern
if s1Value and s2Value:
if tEin != None:
tPair=(tEin,i2)
tPairs.append(tPair)
# Behandlung tdAllowed
if tdAllowed != None:
if debugOutput:
logger.debug("{:s}Series {!s:s}: Intervalle werden mit {!s:s} zusammengefasst ...".format(logStr,s.name,tdAllowed))
tPairsOld=tPairs.copy()
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed,debugOutput=debugOutput)
if debugOutput:
tPairsZusammengefasst=sorted(list(set(tPairsOld) - set(tPairs)))
if len(tPairsZusammengefasst)>0:
logger.debug("{:s}Series {!s:s}: Intervalle wurden wg. {!s:s} zusammengefasst. Nachfolgend die zusgefassten Intervalle: {!s:s}. Sowie die entsprechenden neuen: {!s:s}".format(
logStr
,s.name
,tdAllowed
,tPairsZusammengefasst
,sorted(list(set(tPairs) - set(tPairsOld)))
))
else:
# paarweise über alle Zeiten
# neues Paar beginnen
anzInPair=1 # Anzahl der Zeiten in aktueller Zeitspanne
for (i1, s1), (i2, s2) in pairwise(s.iteritems()):
td=i2-i1
if td > tdAllowed: # Zeit zwischen 2 Zeiten > als Schwelle: Zeitspanne ist abgeschlossen
if tEin==None:
# erstes Paar liegt bereits > als Schwelle auseinander
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
# aktuelle Zeitspanne beginnt beim 1. Wert und geht über Schwellwert
tEin=i1
anzInPair=2
else:
if anzInPair>=2:
# Zeitspanne abschließen
tPair=(tEin,i1)
tPairs.append(tPair)
# neue Zeitspanne beginnen
tEin=i2
anzInPair=1
else:
# Zeitspannenabschluss wird ignoriert, denn sonst Zeitspanne mit nur 1 Wert
anzInPair=2
else: # Zeitspanne zugelassen, weiter ...
if tEin==None:
tEin=i1
anzInPair=anzInPair+1
# letztes Zeitpaar behandeln
if anzInPair>=2:
tPair=(tEin,i2)
tPairs.append(tPair)
else:
# ein letzter Wert wuerde ueber bleiben, letzte Zeitspanne verlängern ...
tPair=tPairs[-1]
tPair=(tPair[0],i2)
tPairs[-1]=tPair
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
def fCombineSubsequenttPairs(
tPairs
,tdAllowed=pd.Timedelta('1 second') # all subsequent TimePairs with TimeDifference <= tdAllowed are combined to one TimePair
,debugOutput=False
):
# returns tPairs
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
for idx,(tp1,tp2) in enumerate(pairwise(tPairs)):
t1Ende=tp1[1]
t2Start=tp2[0]
if t2Start-t1Ende <= tdAllowed:
if debugOutput:
logger.debug("{:s} t1Ende: {!s:s} t2Start: {!s:s} Gap: {!s:s}".format(logStr,t1Ende,t2Start,t2Start-t1Ende))
tPairs[idx]=(tp1[0],tp2[1]) # Folgepaar in vorheriges Paar integrieren
tPairs.remove(tp2) # Folgepaar löschen
tPairs=fCombineSubsequenttPairs(tPairs,tdAllowed) # Rekursion
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return tPairs
class RmError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
AlarmEvent = namedtuple('alarmEvent','tA,tE,ZHKNR,LDSResBaseType')
# --- Parameter und Funktionen LDS Reports
# ----------------------------------------
def pltMakeCategoricalColors(color,nOfSubColorsReq=3,reversedOrder=False):
"""
Returns an array of rgb colors derived from color.
Parameter:
color: a rgb color
nOfSubColorsReq: number of SubColors requested
Raises:
RmError
>>> import matplotlib
>>> color='red'
>>> c=list(matplotlib.colors.to_rgb(color))
>>> import Rm
>>> Rm.pltMakeCategoricalColors(c)
array([[1. , 0. , 0. ],
[1. , 0.375, 0.375],
[1. , 0.75 , 0.75 ]])
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
rgb=None
try:
chsv = matplotlib.colors.rgb_to_hsv(color[:3])
arhsv = np.tile(chsv,nOfSubColorsReq).reshape(nOfSubColorsReq,3)
arhsv[:,1] = np.linspace(chsv[1],0.25,nOfSubColorsReq)
arhsv[:,2] = np.linspace(chsv[2],1,nOfSubColorsReq)
rgb = matplotlib.colors.hsv_to_rgb(arhsv)
if reversedOrder:
rgb=list(reversed(rgb))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return rgb
# Farben fuer Druecke
SrcColorp='green'
SrcColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorp)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorp='blue'
SnkColorsp=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorp)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
# Farben fuer Fluesse
SrcColorQ='red'
SrcColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SrcColorQ)),nOfSubColorsReq=4,reversedOrder=False)
# erste Farbe ist Original-Farbe
SnkColorQ='orange'
SnkColorsQ=pltMakeCategoricalColors(list(matplotlib.colors.to_rgb(SnkColorQ)),nOfSubColorsReq=4,reversedOrder=True)
# letzte Farbe ist Original-Farbe
lwBig=4.5
lwSmall=2.5
attrsDct={ 'p Src':{'color':SrcColorp,'lw':lwBig,'where':'post'}
,'p Snk':{'color':SnkColorp,'lw':lwSmall+1.,'where':'post'}
,'p Snk 2':{'color':'mediumorchid','where':'post'}
,'p Snk 3':{'color':'darkviolet','where':'post'}
,'p Snk 4':{'color':'plum','where':'post'}
,'Q Src':{'color':SrcColorQ,'lw':lwBig,'where':'post'}
,'Q Snk':{'color':SnkColorQ,'lw':lwSmall+1.,'where':'post'}
,'Q Snk 2':{'color':'indianred','where':'post'}
,'Q Snk 3':{'color':'coral','where':'post'}
,'Q Snk 4':{'color':'salmon','where':'post'}
,'Q Src RTTM':{'color':SrcColorQ,'lw':matplotlib.rcParams['lines.linewidth']+1.,'ls':'dotted','where':'post'}
,'Q Snk RTTM':{'color':SnkColorQ,'lw':matplotlib.rcParams['lines.linewidth'] ,'ls':'dotted','where':'post'}
,'Q Snk 2 RTTM':{'color':'indianred','ls':'dotted','where':'post'}
,'Q Snk 3 RTTM':{'color':'coral','ls':'dotted','where':'post'}
,'Q Snk 4 RTTM':{'color':'salmon','ls':'dotted','where':'post'}
,'p ISrc 1':{'color':SrcColorsp[-1],'ls':'dashdot','where':'post'}
,'p ISrc 2':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 3':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISrc 4':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 5':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISrc 6':{'color':SrcColorsp[-2],'ls':'dashdot','where':'post'}
,'p ISnk 1':{'color':SnkColorsp[0],'ls':'dashdot','where':'post'}
,'p ISnk 2':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 3':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'} # ab hier selbe Farbe
,'p ISnk 4':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 5':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'p ISnk 6':{'color':SnkColorsp[1],'ls':'dashdot','where':'post'}
,'Q xSrc 1':{'color':SrcColorsQ[-1],'ls':'dashdot','where':'post'}
,'Q xSrc 2':{'color':SrcColorsQ[-2],'ls':'dashdot','where':'post'}
,'Q xSrc 3':{'color':SrcColorsQ[-3],'ls':'dashdot','where':'post'}
,'Q xSnk 1':{'color':SnkColorsQ[0],'ls':'dashdot','where':'post'}
,'Q xSnk 2':{'color':SnkColorsQ[1],'ls':'dashdot','where':'post'}
,'Q xSnk 3':{'color':SnkColorsQ[2],'ls':'dashdot','where':'post'}
,'Q (DE) Me':{'color': 'indigo','ls': 'dashdot','where': 'post','lw':1.5}
,'Q (DE) Re':{'color': 'cyan','ls': 'dashdot','where': 'post','lw':3.5}
,'p (DE) SS Me':{'color': 'magenta','ls': 'dashdot','where': 'post'}
,'p (DE) DS Me':{'color': 'darkviolet','ls': 'dashdot','where': 'post'}
,'p (DE) SS Re':{'color': 'magenta','ls': 'dotted','where': 'post'}
,'p (DE) DS Re':{'color': 'darkviolet','ls': 'dotted','where': 'post'}
,'p OPC LDSErgV':{'color':'olive'
,'lw':lwSmall-.5
,'ms':matplotlib.rcParams['lines.markersize']
,'marker':'x'
,'mec':'olive'
,'mfc':'olive'
,'where':'post'}
,'p OPC Src':{'color':SrcColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorp
,'mfc':SrcColorQ
,'where':'post'}
,'p OPC Snk':{'color':SnkColorp
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorp
,'mfc':SnkColorQ
,'where':'post'}
,'Q OPC Src':{'color':SrcColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SrcColorQ
,'mfc':SrcColorp
,'where':'post'}
,'Q OPC Snk':{'color':SnkColorQ
,'lw':0.05+2
,'ms':matplotlib.rcParams['lines.markersize']/2
,'marker':'D'
,'mec':SnkColorQ
,'mfc':SnkColorp
,'where':'post'}
}
attrsDctLDS={
'Seg_AL_S_Attrs':{'color':'blue','lw':3.,'where':'post'}
,'Druck_AL_S_Attrs':{'color':'blue','lw':3.,'ls':'dashed','where':'post'}
,'Seg_MZ_AV_Attrs':{'color':'orange','zorder':3,'where':'post'}
,'Druck_MZ_AV_Attrs':{'color':'orange','zorder':3,'ls':'dashed','where':'post'}
,'Seg_LR_AV_Attrs':{'color':'green','zorder':1,'where':'post'}
,'Druck_LR_AV_Attrs':{'color':'green','zorder':1,'ls':'dashed','where':'post'}
,'Seg_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'where':'post'}
,'Druck_LP_AV_Attrs':{'color':'turquoise','zorder':0,'lw':1.50,'ls':'dashed','where':'post'}
,'Seg_NG_AV_Attrs':{'color':'red','zorder':2,'where':'post'}
,'Druck_NG_AV_Attrs':{'color':'red','zorder':2,'ls':'dashed','where':'post'}
,'Seg_SB_S_Attrs':{'color':'black','alpha':.5,'where':'post'}
,'Druck_SB_S_Attrs':{'color':'black','ls':'dashed','alpha':.75,'where':'post','lw':1.0}
,'Seg_AC_AV_Attrs':{'color':'indigo','where':'post'}
,'Druck_AC_AV_Attrs':{'color':'indigo','ls':'dashed','where':'post'}
,'Seg_ACF_AV_Attrs':{'color':'blueviolet','where':'post','lw':1.0}
,'Druck_ACF_AV_Attrs':{'color':'blueviolet','ls':'dashed','where':'post','lw':1.0}
,'Seg_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_ACC_Limits_Attrs':{'color':'indigo','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_TIMER_AV_Attrs':{'color':'chartreuse','where':'post'}
,'Druck_TIMER_AV_Attrs':{'color':'chartreuse','ls':'dashed','where':'post'}
,'Seg_AM_AV_Attrs':{'color':'chocolate','where':'post'}
,'Druck_AM_AV_Attrs':{'color':'chocolate','ls':'dashed','where':'post'}
#
,'Seg_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[2][1]} # 'densely dotted'
,'Druck_DPDT_REF_Attrs':{'color':'violet','ls':linestyle_tuple[8][1]} # 'densely dashdotted'
,'Seg_DPDT_AV_Attrs':{'color':'fuchsia','where':'post','lw':2.0}
,'Druck_DPDT_AV_Attrs':{'color':'fuchsia','ls':'dashed','where':'post','lw':2.0}
,'Seg_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[6][1],'where':'post','lw':1.0} # 'loosely dashdotted'
,'Druck_QM16_AV_Attrs':{'color':'sandybrown','ls':linestyle_tuple[10][1],'where':'post','lw':1.0} # 'loosely dashdotdotted'
}
pSIDEvents=re.compile('(?P<Prae>IMDI\.)?Objects\.(?P<colRegExMiddle>3S_FBG_ESCHIEBER|FBG_ESCHIEBER{1})\.(3S_)?(?P<colRegExSchieberID>[a-z,A-Z,0-9,_]+)\.(?P<colRegExEventID>(In\.ZUST|In\.LAEUFT|In\.LAEUFT_NICHT|In\.STOER|Out\.AUF|Out\.HALT|Out\.ZU)$)')
# ausgewertet werden: colRegExSchieberID (um welchen Schieber geht es), colRegExMiddle (Befehl oder Zustand) und colRegExEventID (welcher Befehl bzw. Zustand)
# die Befehle bzw. Zustaende (die Auspraegungen von colRegExEventID) muessen nachf. def. sein um den Marker (des Befehls bzw. des Zustandes) zu definieren
eventCCmds={ 'Out.AUF':0
,'Out.ZU':1
,'Out.HALT':2}
eventCStats={'In.LAEUFT':3
,'In.LAEUFT_NICHT':4
,'In.ZUST':5
,'Out.AUF':6
,'Out.ZU':7
,'Out.HALT':8
,'In.STOER':9}
valRegExMiddleCmds='3S_FBG_ESCHIEBER' # colRegExMiddle-Auspraegung fuer Befehle (==> eventCCmds)
LDSParameter=[
'ACC_SLOWTRANSIENT'
,'ACC_TRANSIENT'
,'DESIGNFLOW'
,'DT'
,'FILTERWINDOW'
#,'L_PERCENT'
,'L_PERCENT_STDY'
,'L_PERCENT_STRAN'
,'L_PERCENT_TRANS'
,'L_SHUTOFF'
,'L_SLOWTRANSIENT'
,'L_SLOWTRANSIENTQP'
,'L_STANDSTILL'
,'L_STANDSTILLQP'
,'L_TRANSIENT'
,'L_TRANSIENTQP'
,'L_TRANSIENTVBIGF'
,'L_TRANSIENTPDNTF'
,'MEAN'
,'NAME'
,'ORDER'
,'TIMER'
,'TTIMERTOALARM'
,'TIMERTOLISS'
,'TIMERTOLIST'
]
LDSParameterDataD={
'ACC_SLOWTRANSIENT':0.1
,'ACC_TRANSIENT':0.8
,'DESIGNFLOW':250.
,'DT':1
,'FILTERWINDOW':180
#,'L_PERCENT':1.6
,'L_PERCENT_STDY':1.6
,'L_PERCENT_STRAN':1.6
,'L_PERCENT_TRANS':1.6
,'L_SHUTOFF':2.
,'L_SLOWTRANSIENT':4.
,'L_SLOWTRANSIENTQP':4.
,'L_STANDSTILL':2.
,'L_STANDSTILLQP':2.
,'L_TRANSIENT':10.
,'L_TRANSIENTQP':10.
,'L_TRANSIENTVBIGF':3.
,'L_TRANSIENTPDNTF':1.5
,'MEAN':1
,'ORDER':1
,'TIMER':180
,'TTIMERTOALARM':45 # TIMER/4
,'TIMERTOLISS':180
,'TIMERTOLIST':180
,'NAME':''
}
def fSEGNameFromPV_2(Beschr):
# fSEGNameFromSWVTBeschr
# 2,3,4,5
if Beschr in ['',None]:
return None
m=re.search(Lx.pID,Beschr)
if m == None:
return Beschr
return m.group('C2')+'_'+m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')
def fSEGNameFromPV_3(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
def fSEGNameFromPV_3m(PV):
# fSEGNameFromPV
# ...
m=re.search(Lx.pID,PV)
#print("C4: {:s} C6: {:s}".format(m.group('C4'),m.group('C6')))
if m.group('C4')=='AAD' and m.group('C6')=='_OHN':
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+'_OHV1'
elif m.group('C4')=='OHN' and m.group('C6')=='_NGD':
return m.group('C3')+'_'+'OHV2'+'_'+m.group('C5')+m.group('C6')
else:
return m.group('C3')+'_'+m.group('C4')+'_'+m.group('C5')+m.group('C6')
# Ableitung eines DIVPipelineNamens von PV
def fDIVNameFromPV(PV):
m=re.search(Lx.pID,PV)
return m.group('C2')+'-'+m.group('C4')
# Ableitung eines DIVPipelineNamens von SEGName
def fDIVNameFromSEGName(SEGName):
if pd.isnull(SEGName):
return None
# dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
m=re.search('(\d+)_(\w+)_(\w+)_(\w+)',SEGName)
if m == None:
return SEGName
return m.group(1)+'_'+m.group(3)
#def getNamesFromOPCITEM_ID(dfSegsNodesNDataDpkt
# ,OPCITEM_ID):
# """
# Returns tuple (DIVPipelineName,SEGName) from OPCITEM_ID PH
# """
# df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['OPCITEM_ID']==OPCITEM_ID]
# if not df.empty:
# return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def fGetBaseIDFromResID(
ID='Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.MW.value'
):
"""
Returns 'Objects.3S_XXX_DRUCK.3S_6_BNV_01_PTI_01.In.'
funktioniert im Prinzip fuer SEG- und Druck-Ergs: jede Erg-PV eines Vektors liefert die Basis gueltig fuer alle Erg-PVs des Vektors
d.h. die Erg-PVs eines Vektors unterscheiden sich nur hinten
siehe auch fGetSEGBaseIDFromSEGName
"""
if pd.isnull(ID):
return None
m=re.search(Lx.pID,ID)
if m == None:
return None
try:
base=m.group('A')+'.'+m.group('B')\
+'.'+m.group('C1')\
+'_'+m.group('C2')\
+'_'+m.group('C3')\
+'_'+m.group('C4')\
+'_'+m.group('C5')\
+m.group('C6')
#print(m.groups())
#print(m.groupdict())
if 'C7' in m.groupdict().keys():
if m.group('C7') != None:
base=base+m.group('C7')
base=base+'.'+m.group('D')\
+'.'
#print(base)
except:
base=m.group(0)+' (Fehler in fGetBaseIDFromResID)'
return base
def fGetSEGBaseIDFromSEGName(
SEGName='6_AAD_41_OHV1'
):
"""
Returns 'Objects.3S_FBG_SEG_INFO.3S_L_'+SEGName+'.In.'
In some cases SEGName is manipulated ...
siehe auch fGetBaseIDFromResID
"""
if SEGName == '6_AAD_41_OHV1':
x='6_AAD_41_OHN'
elif SEGName == '6_OHV2_41_NGD':
x='6_OHN_41_NGD'
else:
x=SEGName
return 'Objects.3S_FBG_SEG_INFO.3S_L_'+x+'.In.'
def getNamesFromSEGResIDBase(dfSegsNodesNDataDpkt
,SEGResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName) from SEGResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['SEGResIDBase']==SEGResIDBase]
if not df.empty:
return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0])
def getNamesFromDruckResIDBase(dfSegsNodesNDataDpkt
,DruckResIDBase):
"""
Returns tuple (DIVPipelineName,SEGName,SEGResIDBase,SEGOnlyInLDSPara) from DruckResIDBase
"""
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase']==DruckResIDBase]
if not df.empty:
#return (df['DIVPipelineName'].iloc[0],df['SEGName'].iloc[0],df['SEGResIDBase'].iloc[0])
tupleLst=[]
for index,row in df.iterrows():
tupleItem=(row['DIVPipelineName'],row['SEGName'],row['SEGResIDBase'],row['SEGOnlyInLDSPara'])
tupleLst.append(tupleItem)
return tupleLst
else:
return []
def fGetErgIDsFromBaseID(
baseID='Objects.3S_FBG_SEG_INFO.3S_L_6_BUV_01_BUA.In.'
,dfODI=pd.DataFrame() # df mit ODI Parametrierungsdaten
,strSep=' '
,patternPat='^IMDI.' #
,pattern=True # nur ergIDs, fuer die 2ndPatternPat zutrifft liefern
):
"""
returns string
mit strSep getrennten IDs aus dfODI, welche baseID enthalten (und bei pattern WAHR patternPat matchen)
baseID (und group(0) von patternPat bei pattern WAHR) sind in den IDs entfernt
"""
if baseID in [None,'']:
return None
df=dfODI[dfODI.index.str.contains(baseID)]
if df.empty:
return None
if pattern:
ergIDs=''.join([e.replace(baseID,'').replace(re.search(patternPat,e).group(0),'')+' ' for e in df.index if re.search(patternPat,e) != None])
else:
ergIDs=''.join([e.replace(baseID,'')+' ' for e in df.index if re.search(patternPat,e) == None])
return ergIDs
def dfSegsNodesNDataDpkt(
VersionsDir=r"C:\3s\Projekte\Projekt\04 - Versionen\Version82.3"
,Model=r"MDBDOC\FBG.mdb" # a Access Model
,am=None # a Access Model already processed
,SEGsDefPattern='(?P<SEG_Ki>\S+)~(?P<SEG_Kk>\S+)$' # RSLW-Beschreibung: liefert die Knotennamen der Segmentdefinition ()
,RIDefPattern='(?P<Prae>\S+)\.(?P<Post>RICHT.S)$' # SWVT-Beschreibung (RICHT-DP): liefert u.a. SEGName
,fSEGNameFromPV_2=fSEGNameFromPV_2 # Funktion, die von SWVT-Beschreibung (RICHT-DP) u.a. SEGName liefert
,fGetBaseIDFromResID=fGetBaseIDFromResID # Funktion, die von OPCITEM-ID des PH-Kanals eines KNOTens den Wortstamm der Knotenergebnisse liefert
,fGetSEGBaseIDFromSEGName=fGetSEGBaseIDFromSEGName # Funktion, die aus SEGName den Wortstamm der Segmentergebnisse liefert
,LDSPara=r"App LDS\Modelle\WDFBG\B1\V0\BZ1\LDS_Para.xml"
,LDSParaPT=r"App LDS\SirOPC\AppLDS_DPDTParams.csv"
,ODI=r"App LDS\SirOPC\AppLDS_ODI.csv"
,LDSParameter=LDSParameter
,LDSParameterDataD=LDSParameterDataD
):
"""
alle Segmente mit Pfaddaten (Kantenzuege) mit Kanten- und Knotendaten sowie Parametrierungsdaten
returns df:
DIVPipelineName
SEGName
SEGNodes (Ki~Kk; Schluessel in LDSPara)
SEGOnlyInLDSPara
NODEsRef
NODEsRef_max
NODEsSEGLfdNr
NODEsSEGLfdNrType
NODEsName
OBJTYPE
ZKOR
Blockname
ATTRTYPE (PH)
CLIENT_ID
OPCITEM_ID
NAME (der DPKT-Gruppe)
DruckResIDBase
SEGResIDBase
SEGResIDs
SEGResIDsIMDI
DruckResIDs
DruckResIDsIMDI
NODEsSEGDruckErgLfdNr
# LDSPara
ACC_SLOWTRANSIENT
ACC_TRANSIENT
DESIGNFLOW
DT
FILTERWINDOW
L_PERCENT_STDY
L_PERCENT_STRAN
L_PERCENT_TRANS
L_SHUTOFF
L_SLOWTRANSIENT
L_SLOWTRANSIENTQP
L_STANDSTILL
L_STANDSTILLQP
L_TRANSIENT
L_TRANSIENTPDNTF
L_TRANSIENTQP
L_TRANSIENTVBIGF
MEAN
ORDER
TIMER
TIMERTOLISS
TIMERTOLIST
TTIMERTOALARM
# LDSParaPT
#ID
pMin
DT_Vorhaltemass
TTimer_PMin
Faktor_PMin
MaxL_PMin
pMinMlc
pMinMlcMinSEG
pMinMlcMaxSEG
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfSegsNodesNDataDpkt=pd.DataFrame()
try:
###### --- LDSPara
LDSParaFile=os.path.join(VersionsDir,LDSPara)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSPara',LDSPara))
with open(LDSParaFile) as f:
xml = f.read()
xmlWellFormed='<root>'+xml+'</root>'
root=ET.fromstring(xmlWellFormed)
LDSParameterData={}
for key in LDSParameterDataD.keys():
LDSParameterData[key]=[]
logger.debug("{:s}LDSParameter: {!s:s}.".format(logStr,LDSParameter))
for idx,element in enumerate(root.iter(tag='LDSI')):
attribKeysMute=[]
for key,value in element.attrib.items():
if key not in LDSParameter:
logger.warning("{:s}{:s}: Parameter: {:s} undefiniert.".format(logStr,element.attrib['NAME'],key))
attribKeysMute.append(key)
keysIst=element.attrib.keys()
keysSoll=set(LDSParameter)
keysExplizitFehlend=keysSoll-keysIst
LDSIParaDct=element.attrib
for key in keysExplizitFehlend:
if key=='ORDER':
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
elif key=='TTIMERTOALARM':
LDSIParaDct[key]=int(LDSIParaDct['TIMER'])/4
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
else:
LDSIParaDct[key]=LDSParameterDataD[key]
logger.debug("{:s}{:25s}: explizit fehlender Parameter: {:20s} ({!s:s}).".format(logStr,element.attrib['NAME'],key,LDSIParaDct[key]))
keyListToProcess=[key for key in LDSIParaDct.keys() if key not in attribKeysMute]
for key in keyListToProcess:
LDSParameterData[key].append(LDSIParaDct[key])
df=pd.DataFrame.from_dict(LDSParameterData)
df=df.set_index('NAME').sort_index()
df.index.rename('SEGMENT', inplace=True)
df=df[sorted(df.columns.to_list())]
df = df.apply(pd.to_numeric)
#logger.debug("{:s}df: {:s}".format(logStr,df.to_string()))
logger.debug("{:s}Parameter, die nicht auf Standardwerten sind:".format(logStr))
for index, row in df.iterrows():
for colName, colValue in zip(df.columns.to_list(),row):
if colValue != LDSParameterDataD[colName]:
logger.debug("Segment: {:30s}: Parameter: {:20s} Wert: {:10s} (Standard: {:s})".format(index,colName,str(colValue),str(LDSParameterDataD[colName])))
dfPara=df
# --- Einlesen Modell
if am == None:
accFile=os.path.join(VersionsDir,Model)
logger.info("{:s}###### {:10s}: {:s}: Lesen und verarbeiten ...".format(logStr,'Modell',Model))
am=Am.Am(accFile=accFile)
V_BVZ_RSLW=am.dataFrames['V_BVZ_RSLW']
V_BVZ_SWVT=am.dataFrames['V_BVZ_SWVT']
V3_KNOT=am.dataFrames['V3_KNOT']
V3_VBEL=am.dataFrames['V3_VBEL']
V3_DPKT=am.dataFrames['V3_DPKT']
V3_RSLW_SWVT=am.dataFrames['V3_RSLW_SWVT']
# --- Segmente ermitteln
# --- per Modell
SEGsDefinesPerRICHT=V3_RSLW_SWVT[
(V3_RSLW_SWVT['BESCHREIBUNG'].str.match(SEGsDefPattern).isin([True])) # Muster Ki~Kk ...
& #!
(V3_RSLW_SWVT['BESCHREIBUNG_SWVT'].str.match(RIDefPattern).isin([True])) # Muster Förderrichtungs-PV ...
].copy(deep=True)
SEGsDefinesPerRICHT=SEGsDefinesPerRICHT[['BESCHREIBUNG','BESCHREIBUNG_SWVT']]
# --- nur per LDS Para
lSEGOnlyInLDSPara=[str(SEGNodes) for SEGNodes in dfPara.index if str(SEGNodes) not in SEGsDefinesPerRICHT['BESCHREIBUNG'].values]
for SEGNodes in lSEGOnlyInLDSPara:
logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
# --- zusammenfassen
SEGsDefines=pd.concat([SEGsDefinesPerRICHT,pd.DataFrame(lSEGOnlyInLDSPara,columns=['BESCHREIBUNG'])])
# Knotennamen der SEGDef ergänzen
df=SEGsDefines['BESCHREIBUNG'].str.extract(SEGsDefPattern,expand=True)
dfCols=df.columns.to_list()
SEGsDefines=pd.concat([SEGsDefines,df],axis=1)
# ausduennen
SEGsDefines=SEGsDefines[dfCols+['BESCHREIBUNG_SWVT','BESCHREIBUNG']]
# sortieren
SEGsDefines=SEGsDefines.sort_values(by=['BESCHREIBUNG_SWVT','BESCHREIBUNG']).reset_index(drop=True)
# SEGName
SEGsDefines['BESCHREIBUNG_SWVT']=SEGsDefines.apply(lambda row: row['BESCHREIBUNG_SWVT'] if not pd.isnull(row['BESCHREIBUNG_SWVT']) else row['BESCHREIBUNG'] ,axis=1)
#print(SEGsDefines)
SEGsDefines['SEGName']=SEGsDefines['BESCHREIBUNG_SWVT'].apply(lambda x: fSEGNameFromPV_2(x))
# --- Segmentkantenzuege ermitteln
dfSegsNodeLst={} # nur zu Kontrollzwecken
dfSegsNode=[]
for index,row in SEGsDefines[~SEGsDefines[dfCols[-1]].isnull()].iterrows():
df=Xm.Xm.constructShortestPathFromNodeList(df=V3_VBEL.reset_index()
,sourceCol='NAME_i'
,targetCol='NAME_k'
,nl=[row[dfCols[0]],row[dfCols[-1]]]
,weight=None,query=None,fmask=None,filterNonQ0Rows=True)
s=pd.concat([pd.Series([row[dfCols[0]]]),df['nextNODE']])
s.name=row['SEGName']
dfSegsNodeLst[row['SEGName']]=s.reset_index(drop=True)
df2=pd.DataFrame(s.reset_index(drop=True)).rename(columns={s.name:'NODEs'})
df2['SEGName']=s.name
df2=df2[['SEGName','NODEs']]
sObj=pd.concat([pd.Series(['None']),df['OBJTYPE']])
sObj.name='OBJTYPE'
df3=pd.concat([df2,pd.DataFrame(sObj.reset_index(drop=True))],axis=1)
df4=df3.reset_index().rename(columns={'index':'NODEsLfdNr','NODEs':'NODEsName'})[['SEGName','NODEsLfdNr','NODEsName','OBJTYPE']]
df4['NODEsType']=df4.apply(lambda row: row['NODEsLfdNr'] if row['NODEsLfdNr'] < df4.index[-1] else -1, axis=1)
df4=df4[['SEGName','NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
df4['SEGNodes']=row[dfCols[0]]+'~'+row[dfCols[-1]]
dfSegsNode.append(df4)
dfSegsNodes=pd.concat(dfSegsNode).reset_index(drop=True)
# ---
dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes['NODEsRef']=dfSegsNodes.sort_values(
by=['NODEsName','SEGOnlyInLDSPara','NODEsType','SEGName']
,ascending=[True,True,False,True]).groupby(['NODEsName']).cumcount() + 1
dfSegsNodes=pd.merge(dfSegsNodes,dfSegsNodes.groupby(['NODEsName']).max(),left_on='NODEsName',right_index=True,suffixes=('','_max'))
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsLfdNr','NODEsType','NODEsName','OBJTYPE']]
dfSegsNodes=dfSegsNodes.rename(columns={'NODEsLfdNr':'NODEsSEGLfdNr','NODEsType':'NODEsSEGLfdNrType'})
### # ---
### dfSegsNodes['SEGOnlyInLDSPara']=dfSegsNodes.apply(lambda row: True if row['SEGNodes'] in lSEGOnlyInLDSPara else False,axis=1)
dfSegsNodes=dfSegsNodes[['SEGName','SEGNodes','SEGOnlyInLDSPara'
,'NODEsRef'
,'NODEsRef_max'
,'NODEsSEGLfdNr','NODEsSEGLfdNrType','NODEsName','OBJTYPE']]
# --- Knotendaten ergaenzen
dfSegsNodesNData=pd.merge(dfSegsNodes,V3_KNOT, left_on='NODEsName',right_on='NAME',suffixes=('','KNOT'))
dfSegsNodesNData=dfSegsNodesNData.filter(items=dfSegsNodes.columns.to_list()+['ZKOR','NAME_CONT','NAME_VKNO','pk'])
dfSegsNodesNData=dfSegsNodesNData.rename(columns={'NAME_CONT':'Blockname','NAME_VKNO':'Bl.Kn. fuer Block'})
# --- Knotendatenpunktdaten ergänzen
V3_DPKT_KNOT=pd.merge(V3_DPKT,V3_KNOT,left_on='fkOBJTYPE',right_on='pk',suffixes=('','_KNOT'))
V3_DPKT_KNOT_PH=V3_DPKT_KNOT[V3_DPKT_KNOT['ATTRTYPE'].isin(['PH'])]
# Mehrfacheintraege sollte es nicht geben ...
# V3_DPKT_KNOT_PH[V3_DPKT_KNOT_PH.duplicated(subset=['fkOBJTYPE'])]
df=pd.merge(dfSegsNodesNData,V3_DPKT_KNOT_PH,left_on='pk',right_on='fkOBJTYPE',suffixes=('','_DPKT'),how='left')
cols=dfSegsNodesNData.columns.to_list()
cols.remove('pk')
df=df.filter(items=cols+['ATTRTYPE','CLIENT_ID','OPCITEM_ID','NAME'])
dfSegsNodesNDataDpkt=df
#dfSegsNodesNDataDpkt
# ---
colList=dfSegsNodesNDataDpkt.columns.to_list()
dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fDIVNameFromSEGName(x))
### dfSegsNodesNDataDpkt['DIVPipelineName']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(1)+'_'+re.search('(\d+)_(\w+)_(\w+)_(\w+)',x).group(3) )
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.filter(items=['DIVPipelineName']+colList)
dfSegsNodesNDataDpkt=dfSegsNodesNDataDpkt.sort_values(by=['DIVPipelineName','SEGName','NODEsSEGLfdNr']).reset_index(drop=True)
dfSegsNodesNDataDpkt['DruckResIDBase']=dfSegsNodesNDataDpkt['OPCITEM_ID'].apply(lambda x: fGetBaseIDFromResID(x) )
dfSegsNodesNDataDpkt['SEGResIDBase']=dfSegsNodesNDataDpkt['SEGName'].apply(lambda x: fGetSEGBaseIDFromSEGName(x) )
###### --- ODI
ODIFile=os.path.join(VersionsDir,ODI)
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'ODI',ODI))
dfODI=Lx.getDfFromODI(ODIFile)
dfSegsNodesNDataDpkt['SEGResIDs']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['SEGResIDsIMDI']=dfSegsNodesNDataDpkt['SEGResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
dfSegsNodesNDataDpkt['DruckResIDs']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=False))
dfSegsNodesNDataDpkt['DruckResIDsIMDI']=dfSegsNodesNDataDpkt['DruckResIDBase'].apply(lambda x: fGetErgIDsFromBaseID(baseID=x,dfODI=dfODI,pattern=True))
# --- lfd. Nr. der Druckmessstelle im Segment ermitteln
df=dfSegsNodesNDataDpkt[dfSegsNodesNDataDpkt['DruckResIDBase'].notnull()].copy()
df['NODEsSEGDruckErgLfdNr']=df.groupby('SEGName').cumcount() + 1
df['NODEsSEGDruckErgLfdNr']=df['NODEsSEGDruckErgLfdNr'].astype(int)
cols=dfSegsNodesNDataDpkt.columns.to_list()
cols.append('NODEsSEGDruckErgLfdNr')
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt
,df
,left_index=True
,right_index=True
,how='left'
,suffixes=('','_df')
).filter(items=cols)
dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr']=dfSegsNodesNDataDpkt['NODEsSEGDruckErgLfdNr'].astype(int,errors='ignore')
# LDSPara ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfPara,left_on='SEGNodes',right_index=True,suffixes=('','_LDSPara'),how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
#for SEGNodes in [str(SEGNodes) for SEGNodes in df.index if str(SEGNodes) not in dfSegsNodesNDataDpkt['SEGNodes'].values]:
# logger.warning("{:s}LDSPara SEG {:s} ist nicht Modell-definiert!".format(logStr,SEGNodes))
###### --- LDSParaPT
LDSParaPTFile=os.path.join(VersionsDir,LDSParaPT)
if os.path.exists(LDSParaPTFile):
logger.info("{:s}###### {:10s}: {:s}: Lesen und prüfen ...".format(logStr,'LDSParaPT',LDSParaPT))
dfDPDTParams=pd.read_csv(LDSParaPTFile,delimiter=';',error_bad_lines=False,warn_bad_lines=True)
dfMehrfach=dfDPDTParams.groupby(by='#ID').filter(lambda x: len(x) > 1)
rows,cols=dfMehrfach.shape
if rows > 0:
logger.warning("{:s}Mehrfachkonfigurationen:".format(logStr))
logger.warning("{:s}".format(dfMehrfach.to_string()))
dfDPDTParams=dfDPDTParams.groupby(by='#ID').first()
# LDSParaPT ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,dfDPDTParams,left_on='CLIENT_ID',right_on='#ID',how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfOhne=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID']].reset_index(drop=True)
rows,cols=dfOhne.shape
if rows > 0:
logger.debug("{:s}Druckmessstellen ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(dfOhne.to_string()))
dfSegsNodesNDataDpkt['pMinMlc']=dfSegsNodesNDataDpkt.apply(lambda row: row['ZKOR']+row['pMin']*100000/(794.*9.81),axis=1)
g=dfSegsNodesNDataDpkt.groupby(by='SEGName')
df=g.pMinMlc.agg(pMinMlcMinSEG=np.min,pMinMlcMaxSEG=np.max)
# pMinMlcMinSEG, pMinMlcMaxSEG ergaenzen
logger.debug("{:s}dfSegsNodesNDataDpkt: shape vorher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
dfSegsNodesNDataDpkt=pd.merge(dfSegsNodesNDataDpkt,df,left_on='SEGName',right_index=True,how='left')
logger.debug("{:s}dfSegsNodesNDataDpkt: shape nachher: {!s:s}".format(logStr,dfSegsNodesNDataDpkt.shape))
# Segmente ohne Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt.groupby(['SEGName']).first()
df=df[pd.isnull(df['pMinMlcMinSEG'])][['DIVPipelineName','SEGNodes']]
rows,cols=df.shape
if rows > 0:
logger.debug("{:s}ganze Segmente ohne Mindestdruck:".format(logStr))
logger.debug("{:s}".format(df.to_string()))
# Mindestdruecke ausgeben
df=dfSegsNodesNDataDpkt[(~pd.isnull(dfSegsNodesNDataDpkt['CLIENT_ID']) & dfSegsNodesNDataDpkt['CLIENT_ID'].str.len()>0 ) & (~pd.isnull(dfSegsNodesNDataDpkt['pMin'])) ][['DIVPipelineName','SEGName','NODEsName','ZKOR','CLIENT_ID','pMin']].reset_index(drop=True)
logger.debug("{:s}dfSegsNodesNDataDpkt: Mindestdrücke: {!s:s}".format(logStr,df.to_string()))
except RmError:
raise
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfSegsNodesNDataDpkt
def fResValidSeriesSTAT_S(x): # STAT_S
if pd.isnull(x)==False:
if x >=0:
return True
else:
return False
else:
return False
def fResValidSeriesSTAT_S601(x): # STAT_S
if pd.isnull(x)==False:
if x==601:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S(x,value=20): # AL_S
if pd.isnull(x)==False:
if x==value:
return True
else:
return False
else:
return False
def fResValidSeriesAL_S10(x):
return fResValidSeriesAL_S(x,value=10)
def fResValidSeriesAL_S4(x):
return fResValidSeriesAL_S(x,value=4)
def fResValidSeriesAL_S3(x):
return fResValidSeriesAL_S(x,value=3)
ResChannelFunctions=[fResValidSeriesSTAT_S,fResValidSeriesAL_S,fResValidSeriesSTAT_S601]
ResChannelResultNames=['Zustaendig','Alarm','Stoerung']
ResChannelTypes=['STAT_S','AL_S','STAT_S']
# (fast) alle verfuegbaren Erg-Kanaele
ResChannelTypesAll=['AL_S','STAT_S','SB_S','MZ_AV','LR_AV','NG_AV','LP_AV','AC_AV','ACCST_AV','ACCTR_AV','ACF_AV','TIMER_AV','AM_AV','DNTD_AV','DNTP_AV','DPDT_AV'
,'DPDT_REF_AV'
,'DPDT_REF' # Workaround
,'QM_AV','ZHKNR_S']
baseColorsSchieber=[ # Schieberfarben
'g' # 1
,'b' # 2
,'m' # 3
,'r' # 4
,'c' # 5
# alle Basisfarben außer y gelb
,'tab:blue' # 6
,'tab:orange' # 7
,'tab:green' # 8
,'tab:red' # 9
,'tab:purple' # 10
,'tab:brown' # 11
,'tab:pink' # 12
,'gold' # 13
,'fuchsia' # 14
,'coral' # 15
]
markerDefSchieber=[ # Schiebersymobole
'^' # 0 Auf
,'v' # 1 Zu
,'>' # 2 Halt
# ab hier Zustaende
,'4' # 3 Laeuft
,'3' # 4 Laeuft nicht
,'P' # 5 Zust
,'1' # 6 Auf
,'2' # 7 Zu
,'+' # 8 Halt
,'x' # 9 Stoer
]
# --- Reports LDS: Funktionen und Hilfsfunktionen
# -----------------------------------------------
def getLDSResVecDf(
ResIDBase='ID.' # i.e. for Segs Objects.3S_XYZ_SEG_INFO.3S_L_6_EL1_39_TUD.In. / i.e. for Drks Objects.3S_XYZ_DRUCK.3S_6_EL1_39_PTI_02_E.In.
,LDSResBaseType='SEG' # or Druck
,lx=None
,timeStart=None,timeEnd=None
,ResChannelTypes=ResChannelTypesAll
,timeShiftPair=None
):
"""
returns a df: the specified LDSResChannels (AL_S, ...) for an ResIDBase
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfResVec=pd.DataFrame()
try:
# zu lesende IDs basierend auf ResIDBase bestimmen
ErgIDs=[ResIDBase+ext for ext in ResChannelTypes]
IMDIErgIDs=['IMDI.'+ID for ID in ErgIDs]
ErgIDsAll=[*ErgIDs,*IMDIErgIDs]
# Daten lesen von TC-H5s
dfFiltered=lx.getTCsFromH5s(timeStart=timeStart,timeEnd=timeEnd,LDSResOnly=True,LDSResColsSpecified=ErgIDsAll,LDSResTypeSpecified=LDSResBaseType,timeShiftPair=timeShiftPair)
# Spalten umbenennen
colDct={}
for col in dfFiltered.columns:
m=re.search(Lx.pID,col)
colDct[col]=m.group('E')
dfResVec=dfFiltered.rename(columns=colDct)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise RmError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfResVec
def fGetResTimes(
ResIDBases=[] # Liste der Wortstaemme der Ergebnisvektoren
,df=pd.DataFrame() # TCsLDSRes...
,ResChannelTypes=ResChannelTypes # ['STAT_S','AL_S','STAT_S'] # Liste der Ergebnisvektoren Postfixe
,ResChannelFunctions=ResChannelFunctions # [fResValidSeriesSTAT_S,ResValidSeriesAL_S,fResValidSeriesSTAT_S601] # Liste der Ergebnisvektoren Funktionen
,ResChannelResultNames=ResChannelResultNames # ['Zustaendig','Alarm','Stoerung'] # Liste der key-Namen der Ergebnisse
,tdAllowed=pd.Timedelta('1 second') # erlaubte Zeitspanne zwischen geht und kommt (die beiden an diese Zeitspanne angrenzenden Zeitbereiche werden als 1 Zeit gewertet)
):
"""
Return: dct
key: ResIDBase
value: dct:
key: ResChannelResultName
Value: Liste mit Zeitpaaren (oder leere Liste)
"""
resTimesDct={}
for ResIDBase in ResIDBases:
tPairsDct={}
for idx,ext in enumerate(ResChannelTypes):
ID=ResIDBase+ext
if ext == 'AL_S':
debugOutput=True
else:
debugOutput=False
if ID in df:
#print("{:s} in Ergliste".format(ID))
tPairs=findAllTimeIntervallsSeries(
s=df[ID].dropna() #!
,fct=ResChannelFunctions[idx]
,tdAllowed=tdAllowed#pd.Timedelta('1 second')
,debugOutput=debugOutput
)
else:
#print("{:s} nicht in Ergliste".format(ID))
tPairs=[]
tPairsDct[ResChannelResultNames[idx]]=tPairs
resTimesDct[ResIDBase]=tPairsDct
return resTimesDct
def getAlarmStatistikData(
h5File='a.h5'
,dfSegsNodesNDataDpkt=pd.DataFrame()
,timeShiftPair=None # z.B. (1,'H') bei Replay
):
"""
Returns TCsLDSRes1,TCsLDSRes2,dfCVDataOnly
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
TCsLDSRes1=pd.DataFrame()
TCsLDSRes2= | pd.DataFrame() | pandas.DataFrame |
import rasterio
import geopandas as gpd
import numpy
import os
import sys
from osgeo import ogr
from osgeo import gdal
from osgeo import osr
import pandas as pd
from rasterio.windows import Window
from rasterio.windows import from_bounds
from rasterio.mask import mask
import tqdm.notebook as tqdm
import numpy.ma as ma
import numpy as np
def Exposure(input_zone, input_value_raster,Ear_Table_PK,agg_col=None):
vector=ogr.Open(input_zone)
lyr =vector.GetLayer()
feat = lyr.GetNextFeature()
geom = feat.GetGeometryRef()
geometrytype=geom.GetGeometryName()
if (geometrytype== 'POLYGON' or geometrytype== 'MULTIPOLYGON'):
return zonalPoly(input_zone,input_value_raster,Ear_Table_PK,agg_col=agg_col)
elif(geometrytype=='POINT' or geometrytype=='MULTIPOINT'):
return zonalLine(input_zone,input_value_raster,Ear_Table_PK,agg_col=agg_col)
elif(geometrytype=='LINESTRING' or geometrytype=='MULTILINESTRING'):
return zonalPoint(lyr,input_value_raster,Ear_Table_PK,agg_col=agg_col)
def zonalPoly(input_zone_data,input_value_raster,Ear_Table_PK,agg_col):
raster=rasterio.open(input_value_raster)
data=gpd.read_file(input_zone_data)
df=pd.DataFrame()
for ind,row in tqdm.tqdm(data.iterrows(),total=data.shape[0]):
maska,transform=rasterio.mask.mask(raster, [row.geometry], crop=True,nodata=0)
zoneraster = ma.masked_array(maska, mask=maska==0)
len_ras=zoneraster.count()
#print(len_ras)
if len_ras==0:
continue
unique, counts = np.unique(zoneraster, return_counts=True)
if ma.is_masked(unique):
unique=unique.filled(0)
idx=np.where(unique==0)[0][0]
#print(idx)
ids=np.delete(unique, idx)
cus=np.delete(counts, idx)
else:
ids=unique
cus=counts
frequencies = np.asarray((ids, cus)).T
for i in range(len(frequencies)):
frequencies[i][1]=(frequencies[i][1]/len_ras)*100
#print(frequencies)
df_temp= pd.DataFrame(frequencies, columns=['class','exposed'])
df_temp['geom_id']=row[Ear_Table_PK]
if agg_col != None :
df_temp['admin_unit']=row[agg_col]
df_temp['areaOrLen']=row.geometry.area
df=df.append(df_temp,ignore_index=True)
raster=None
return df
def zonalLine(lyr,input_value_raster,Ear_Table_PK,agg_col):
tempDict={}
featlist=range(lyr.GetFeatureCount())
raster = gdal.Open(input_value_raster)
projras = osr.SpatialReference(wkt=raster.GetProjection())
epsgras=projras.GetAttrValue('AUTHORITY',1)
projear=lyr.GetSpatialRef()
epsgear=projear.GetAttrValue('AUTHORITY',1)
#print(epsgear,epsgras)
if not epsgras==epsgear:
toEPSG="EPSG:"+str(epsgear)
output_raster=input_value_raster.replace(".tif","_projected.tif")
gdal.Warp(output_raster,input_value_raster,dstSRS=toEPSG)
raster=None
raster=gdal.Open(output_raster)
else:
pass
# Get raster georeference info
raster_srs = osr.SpatialReference()
raster_srs.ImportFromWkt(raster.GetProjectionRef())
gt=raster.GetGeoTransform()
xOrigin = gt[0]
yOrigin = gt[3]
pixelWidth = gt[1]
pixelHeight = gt[5]
rb=raster.GetRasterBand(1)
df = | pd.DataFrame() | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB,GaussianNB,BernoulliNB,ComplementNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.multioutput import MultiOutputClassifier
from sklearn.pipeline import Pipeline
from sklearn.neighbors import NearestCentroid
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
import pickle
from collections import defaultdict
#categories = ['0','אזרחות, תושבות וכניסה לישראל','בחירות','ביטחון','ביטחון הפנים','בינוי ושיכון','בנקאות וכספים', 'בריאות','8', 'דתות','הגנת הסביבה','חוץ' , 'חוקי הסדרים', 'חוק-יסוד' ,'חינוך','חקלאות','כנסת','מדע', 'מועדים',
# 'מיסוי','מסחר ותעשייה', 'מעמד אישי', 'מקצועות הבריאות', 'מקרקעין' , 'משפט אזרחי' , 'משפט מינהלי' , 'משפט פלילי' , 'ניהול נכסים', 'ספורט', 'ספנות', 'ערכות שיפוטיות' , 'פיתוח והשקעות' , 'פנסיה, ביטוח ושוק ההון',
# 'צרכנות','קליטת עלייה','ראיות וסדרי דין','ראשי המדינה','רווחה','רשויות מקומיות','שירות הציבור' ,'תאגידים' , 'תחבורה ובטיחות בדרכים' , 'תיירות', 'תכנון ובנייה','תעופה','תעסוקה','תקציב', '47', 'תקשורת','תרבות',
# 'תשתיות','מילווה למדינה','טכנולוגיה וסייבר' ]
categories = range(0,52)
print('getting files')
file_names = os.listdir('./Israel_Law_post_parse/')
file_names = ['./Israel_Law_post_parse/' + name for name in file_names]
print('reading csv')
df = pd.read_csv('./file.csv')
print('filtering ids not in csv')
df = df[~df.id.isin(file_names)]
df_toxic = df.drop(['id'], axis=1)
print(df_toxic)
counts=[]
#number of laws per category
categories = list(df_toxic.columns.values)
for i in categories:
counts.append((i,df_toxic[i].sum()))
df_stats= | pd.DataFrame(counts,columns=['category','number_of_laws']) | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
tm.assert_series_equal(empty, empty2)
@pytest.mark.parametrize("input_arg", [np.nan, float("nan")])
def test_constructor_nan(self, input_arg):
empty = Series(dtype="float64", index=range(10))
empty2 = Series(input_arg, index=range(10))
tm.assert_series_equal(empty, empty2, check_index_type=False)
@pytest.mark.parametrize(
"dtype",
["f8", "i8", "M8[ns]", "m8[ns]", "category", "object", "datetime64[ns, UTC]"],
)
@pytest.mark.parametrize("index", [None, pd.Index([])])
def test_constructor_dtype_only(self, dtype, index):
# GH-20865
result = pd.Series(dtype=dtype, index=index)
assert result.dtype == dtype
assert len(result) == 0
def test_constructor_no_data_index_order(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
result = pd.Series(index=["b", "a", "c"])
assert result.index.tolist() == ["b", "a", "c"]
def test_constructor_no_data_string_type(self):
# GH 22477
result = pd.Series(index=[1], dtype=str)
assert np.isnan(result.iloc[0])
@pytest.mark.parametrize("item", ["entry", "ѐ", 13])
def test_constructor_string_element_string_type(self, item):
# GH 22477
result = pd.Series(item, index=[1], dtype=str)
assert result.iloc[0] == str(item)
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
ser = Series(["x", None], dtype=string_dtype)
result = ser.isna()
expected = Series([False, True])
tm.assert_series_equal(result, expected)
assert ser.iloc[1] is None
ser = Series(["x", np.nan], dtype=string_dtype)
assert np.isnan(ser.iloc[1])
def test_constructor_series(self):
index1 = ["d", "b", "a", "c"]
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
tm.assert_series_equal(s2, s1.sort_index())
def test_constructor_iterable(self):
# GH 21987
class Iter:
def __iter__(self):
for i in range(10):
yield i
expected = Series(list(range(10)), dtype="int64")
result = Series(Iter(), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_sequence(self):
# GH 21987
expected = Series(list(range(10)), dtype="int64")
result = Series(range(10), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_single_str(self):
# GH 21987
expected = Series(["abc"])
result = Series("abc")
tm.assert_series_equal(result, expected)
def test_constructor_list_like(self):
# make sure that we are coercing different
# list-likes to standard dtypes and not
# platform specific
expected = Series([1, 2, 3], dtype="int64")
for obj in [[1, 2, 3], (1, 2, 3), np.array([1, 2, 3], dtype="int64")]:
result = Series(obj, index=[0, 1, 2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["bool", "int32", "int64", "float64"])
def test_constructor_index_dtype(self, dtype):
# GH 17088
s = Series(Index([0, 2, 4]), dtype=dtype)
assert s.dtype == dtype
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements from a list are converted to strings
# when dtype is str, 'str', or 'U'
result = Series(input_vals, dtype=string_dtype)
expected = Series(input_vals).astype(string_dtype)
tm.assert_series_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = Series([1.0, 2.0, np.nan], dtype=string_dtype)
expected = Series(["1.0", "2.0", np.nan], dtype=object)
tm.assert_series_equal(result, expected)
assert np.isnan(result[2])
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(range(10))
tm.assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=range(10, 20))
exp.index = range(10, 20)
tm.assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ["a", "b", "c"], fastpath=True)
res = Series(cat)
tm.assert_categorical_equal(res.values, cat)
# can cast to a new dtype
result = Series(pd.Categorical([1, 2, 3]), dtype="int64")
expected = pd.Series([1, 2, 3], dtype="int64")
tm.assert_series_equal(result, expected)
# GH12574
cat = Series(pd.Categorical([1, 2, 3]), dtype="category")
assert is_categorical_dtype(cat)
assert is_categorical_dtype(cat.dtype)
s = Series([1, 2, 3], dtype="category")
assert is_categorical_dtype(s)
assert is_categorical_dtype(s.dtype)
def test_constructor_categorical_with_coercion(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])
# test basic creation / coercion of categoricals
s = Series(factor, name="A")
assert s.dtype == "category"
assert len(s) == len(factor)
str(s.values)
str(s)
# in a frame
df = DataFrame({"A": factor})
result = df["A"]
tm.assert_series_equal(result, s)
result = df.iloc[:, 0]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
df = DataFrame({"A": s})
result = df["A"]
tm.assert_series_equal(result, s)
assert len(df) == len(factor)
str(df.values)
str(df)
# multiples
df = DataFrame({"A": s, "B": s, "C": 1})
result1 = df["A"]
result2 = df["B"]
tm.assert_series_equal(result1, s)
tm.assert_series_equal(result2, s, check_names=False)
assert result2.name == "B"
assert len(df) == len(factor)
str(df.values)
str(df)
# GH8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name) # doing this breaks transform
expected = x.iloc[0].person_name
result = x.person_name.iloc[0]
assert result == expected
result = x.person_name[0]
assert result == expected
result = x.person_name.loc[0]
assert result == expected
def test_constructor_categorical_dtype(self):
result = pd.Series(
["a", "b"], dtype=CategoricalDtype(["a", "b", "c"], ordered=True)
)
assert is_categorical_dtype(result.dtype) is True
tm.assert_index_equal(result.cat.categories, pd.Index(["a", "b", "c"]))
assert result.cat.ordered
result = pd.Series(["a", "b"], dtype=CategoricalDtype(["b", "a"]))
assert is_categorical_dtype(result.dtype)
tm.assert_index_equal(result.cat.categories, pd.Index(["b", "a"]))
assert result.cat.ordered is False
# GH 19565 - Check broadcasting of scalar with Categorical dtype
result = Series(
"a", index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
expected = Series(
["a", "a"], index=[0, 1], dtype=CategoricalDtype(["a", "b"], ordered=True)
)
tm.assert_series_equal(result, expected)
def test_constructor_categorical_string(self):
# GH 26336: the string 'category' maintains existing CategoricalDtype
cdt = CategoricalDtype(categories=list("dabc"), ordered=True)
expected = Series(list("abcabc"), dtype=cdt)
# Series(Categorical, dtype='category') keeps existing dtype
cat = Categorical(list("abcabc"), dtype=cdt)
result = Series(cat, dtype="category")
tm.assert_series_equal(result, expected)
# Series(Series[Categorical], dtype='category') keeps existing dtype
result = Series(result, dtype="category")
tm.assert_series_equal(result, expected)
def test_categorical_sideeffects_free(self):
# Passing a categorical to a Series and then changing values in either
# the series or the categorical should not change the values in the
# other one, IF you specify copy!
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat, copy=True)
assert s.cat is not cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# setting
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_cat)
# however, copy is False by default
# so this WILL change values
cat = Categorical(["a", "b", "c", "a"])
s = Series(cat)
assert s.values is cat
s.cat.categories = [1, 2, 3]
exp_s = np.array([1, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s)
tm.assert_numpy_array_equal(cat.__array__(), exp_s)
s[0] = 2
exp_s2 = np.array([2, 2, 3, 1], dtype=np.int64)
tm.assert_numpy_array_equal(s.__array__(), exp_s2)
tm.assert_numpy_array_equal(cat.__array__(), exp_s2)
def test_unordered_compare_equal(self):
left = pd.Series(["a", "b", "c"], dtype=CategoricalDtype(["a", "b"]))
right = pd.Series(pd.Categorical(["a", "b", np.nan], categories=["a", "b"]))
tm.assert_series_equal(left, right)
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0.0, np.nan, 2.0], index=index)
tm.assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=float)
tm.assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([0, np.nan, 2], index=index, dtype=float)
tm.assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([np.nan, np.nan, np.nan], dtype=object)
tm.assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series([True, np.nan, False], index=index, dtype=object)
tm.assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
tm.assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype="M8[ns]")
result = Series(data)
expected = Series([iNaT, iNaT, iNaT], dtype="M8[ns]")
tm.assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ["a", "b", "c"]
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), iNaT, datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series(
[datetime(2001, 1, 1), datetime(2001, 1, 2), datetime(2001, 1, 3)],
index=index,
dtype="M8[ns]",
)
tm.assert_series_equal(result, expected)
def test_constructor_maskedarray_hardened(self):
# Check numpy masked arrays with hard masks -- from GH24574
data = ma.masked_all((3,), dtype=float).harden_mask()
result = pd.Series(data)
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(result, expected)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range("20090415", "20090519", freq="B")
data = {k: 1 for k in rng}
result = Series(data, index=rng)
assert result.index is rng
def test_constructor_default_index(self):
s = Series([0, 1, 2])
tm.assert_index_equal(s.index, pd.Index(np.arange(3)))
@pytest.mark.parametrize(
"input",
[
[1, 2, 3],
(1, 2, 3),
list(range(3)),
pd.Categorical(["a", "b", "a"]),
(i for i in range(3)),
map(lambda x: x, range(3)),
],
)
def test_constructor_index_mismatch(self, input):
# GH 19342
# test that construction of a Series with an index of different length
# raises an error
msg = "Length of passed values is 3, index implies 4"
with pytest.raises(ValueError, match=msg):
Series(input, index=np.arange(4))
def test_constructor_numpy_scalar(self):
# GH 19342
# construction with a numpy scalar
# should not raise
result = Series(np.array(100), index=np.arange(4), dtype="int64")
expected = Series(100, index=np.arange(4), dtype="int64")
tm.assert_series_equal(result, expected)
def test_constructor_broadcast_list(self):
# GH 19342
# construction with single-element container and index
# should raise
msg = "Length of passed values is 1, index implies 3"
with pytest.raises(ValueError, match=msg):
Series(["foo"], index=["a", "b", "c"])
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
assert isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1.0, 1.0, 8.0]), dtype="i8")
assert s.dtype == np.dtype("i8")
s = Series(np.array([1.0, 1.0, np.nan]), copy=True, dtype="i8")
assert s.dtype == np.dtype("f8")
def test_constructor_copy(self):
# GH15125
# test dtype parameter has no side effects on copy=True
for data in [[1.0], np.array([1.0])]:
x = Series(data)
y = pd.Series(x, copy=True, dtype=float)
# copy=True maintains original data in Series
tm.assert_series_equal(x, y)
# changes to origin of copy does not affect the copy
x[0] = 2.0
assert not x.equals(y)
assert x[0] == 2.0
assert y[0] == 1.0
@pytest.mark.parametrize(
"index",
[
pd.date_range("20170101", periods=3, tz="US/Eastern"),
pd.date_range("20170101", periods=3),
pd.timedelta_range("1 day", periods=3),
pd.period_range("2012Q1", periods=3, freq="Q"),
pd.Index(list("abc")),
pd.Int64Index([1, 2, 3]),
pd.RangeIndex(0, 3),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_limit_copies(self, index):
# GH 17449
# limit copies of input
s = pd.Series(index)
# we make 1 copy; this is just a smoke test here
assert s._mgr.blocks[0].values is not index
def test_constructor_pass_none(self):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(None, index=range(5))
assert s.dtype == np.float64
s = Series(None, index=range(5), dtype=object)
assert s.dtype == np.object_
# GH 7431
# inference on the index
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
tm.assert_series_equal(s, expected)
def test_constructor_pass_nan_nat(self):
# GH 13467
exp = Series([np.nan, np.nan], dtype=np.float64)
assert exp.dtype == np.float64
tm.assert_series_equal(Series([np.nan, np.nan]), exp)
tm.assert_series_equal(Series(np.array([np.nan, np.nan])), exp)
exp = Series([pd.NaT, pd.NaT])
assert exp.dtype == "datetime64[ns]"
tm.assert_series_equal(Series([pd.NaT, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, pd.NaT])), exp)
tm.assert_series_equal(Series([pd.NaT, np.nan]), exp)
tm.assert_series_equal(Series(np.array([pd.NaT, np.nan])), exp)
tm.assert_series_equal(Series([np.nan, pd.NaT]), exp)
tm.assert_series_equal(Series(np.array([np.nan, pd.NaT])), exp)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Series(["a", "b", "c"], dtype=float)
def test_constructor_unsigned_dtype_overflow(self, uint_dtype):
# see gh-15832
msg = "Trying to coerce negative values to unsigned integers"
with pytest.raises(OverflowError, match=msg):
Series([-1], dtype=uint_dtype)
def test_constructor_coerce_float_fail(self, any_int_dtype):
# see gh-15832
msg = "Trying to coerce float values to integers"
with pytest.raises(ValueError, match=msg):
Series([1, 2, 3.5], dtype=any_int_dtype)
def test_constructor_coerce_float_valid(self, float_dtype):
s = Series([1, 2, 3.5], dtype=float_dtype)
expected = Series([1, 2, 3.5]).astype(float_dtype)
tm.assert_series_equal(s, expected)
def test_constructor_dtype_no_cast(self):
# see gh-1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
assert s[1] == 5
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly inferring on dateimelike looking when object dtype is
# specified
s = Series([Timestamp("20130101"), "NOV"], dtype=object)
assert s.iloc[0] == Timestamp("20130101")
assert s.iloc[1] == "NOV"
assert s.dtype == object
# the dtype was being reset on the slicing and re-inferred to datetime
# even thought the blocks are mixed
belly = "216 3T19".split()
wing1 = "2T15 4H19".split()
wing2 = "416 4T20".split()
mat = pd.to_datetime("2016-01-22 2019-09-07".split())
df = pd.DataFrame({"wing1": wing1, "wing2": wing2, "mat": mat}, index=belly)
result = df.loc["3T19"]
assert result.dtype == object
result = df.loc["216"]
assert result.dtype == object
def test_constructor_datetimes_with_nulls(self):
# gh-15869
for arr in [
np.array([None, None, None, None, datetime.now(), None]),
np.array([None, None, datetime.now(), None]),
]:
result = Series(arr)
assert result.dtype == "M8[ns]"
def test_constructor_dtype_datetime64(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
assert isna(s).all()
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(iNaT, index=range(5))
assert not isna(s).all()
s = Series(np.nan, dtype="M8[ns]", index=range(5))
assert isna(s).all()
s = Series([datetime(2001, 1, 2, 0, 0), iNaT], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
s = Series([datetime(2001, 1, 2, 0, 0), np.nan], dtype="M8[ns]")
assert isna(s[1])
assert s.dtype == "M8[ns]"
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
assert s.dtype == "M8[ns]"
s.iloc[0] = np.nan
assert s.dtype == "M8[ns]"
# GH3414 related
expected = Series(
[datetime(2013, 1, 1), datetime(2013, 1, 2), datetime(2013, 1, 3)],
dtype="datetime64[ns]",
)
result = Series(Series(dates).astype(np.int64) / 1000000, dtype="M8[ms]")
tm.assert_series_equal(result, expected)
result = Series(dates, dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
expected = Series(
[pd.NaT, datetime(2013, 1, 2), datetime(2013, 1, 3)], dtype="datetime64[ns]"
)
result = Series([np.nan] + dates[1:], dtype="datetime64[ns]")
tm.assert_series_equal(result, expected)
dts = Series(dates, dtype="datetime64[ns]")
# valid astype
dts.astype("int64")
# invalid casting
msg = r"cannot astype a datetimelike from \[datetime64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
dts.astype("int32")
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(dts, dtype=np.int64)
expected = Series(dts.astype(np.int64))
tm.assert_series_equal(result, expected)
# invalid dates can be help as object
result = Series([datetime(2, 1, 1)])
assert result[0] == datetime(2, 1, 1, 0, 0)
result = Series([datetime(3000, 1, 1)])
assert result[0] == datetime(3000, 1, 1, 0, 0)
# don't mix types
result = Series([Timestamp("20130101"), 1], index=["a", "b"])
assert result["a"] == Timestamp("20130101")
assert result["b"] == 1
# GH6529
# coerce datetime64 non-ns properly
dates = date_range("01-Jan-2015", "01-Dec-2015", freq="M")
values2 = dates.view(np.ndarray).astype("datetime64[ns]")
expected = Series(values2, index=dates)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, dates)
tm.assert_series_equal(result, expected)
# GH 13876
# coerce to non-ns to object properly
expected = Series(values2, index=dates, dtype=object)
for dtype in ["s", "D", "ms", "us", "ns"]:
values1 = dates.view(np.ndarray).astype(f"M8[{dtype}]")
result = Series(values1, index=dates, dtype=object)
tm.assert_series_equal(result, expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()], dtype=object)
series1 = Series(dates2, dates)
tm.assert_numpy_array_equal(series1.values, dates2)
assert series1.dtype == object
# these will correctly infer a datetime
s = Series([None, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([np.nan, pd.NaT, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, None, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
s = Series([pd.NaT, np.nan, "2013-08-05 15:30:00.000001"])
assert s.dtype == "datetime64[ns]"
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range("20130101", periods=3)
assert Series(dr).iloc[0].tz is None
dr = date_range("20130101", periods=3, tz="UTC")
assert str(Series(dr).iloc[0].tz) == "UTC"
dr = date_range("20130101", periods=3, tz="US/Eastern")
assert str(Series(dr).iloc[0].tz) == "US/Eastern"
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
assert s.dtype == "object"
assert s[2] is pd.NaT
assert "NaT" in str(s)
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
assert s.dtype == "object"
assert s[2] is np.nan
assert "NaN" in str(s)
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range("20130101", periods=3, tz="US/Eastern")
s = Series(dr)
assert s.dtype.name == "datetime64[ns, US/Eastern]"
assert s.dtype == "datetime64[ns, US/Eastern]"
assert is_datetime64tz_dtype(s.dtype)
assert "datetime64[ns, US/Eastern]" in str(s)
# export
result = s.values
assert isinstance(result, np.ndarray)
assert result.dtype == "datetime64[ns]"
exp = pd.DatetimeIndex(result)
exp = exp.tz_localize("UTC").tz_convert(tz=s.dt.tz)
tm.assert_index_equal(dr, exp)
# indexing
result = s.iloc[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[0]
assert result == Timestamp(
"2013-01-01 00:00:00-0500", tz="US/Eastern", freq="D"
)
result = s[Series([True, True, False], index=s.index)]
tm.assert_series_equal(result, s[0:2])
result = s.iloc[0:1]
tm.assert_series_equal(result, Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1], s.iloc[1:]])
tm.assert_series_equal(result, s)
# short str
assert "datetime64[ns, US/Eastern]" in str(s)
# formatting with NaT
result = s.shift()
assert "datetime64[ns, US/Eastern]" in str(result)
assert "NaT" in str(result)
# long str
t = Series(date_range("20130101", periods=1000, tz="US/Eastern"))
assert "datetime64[ns, US/Eastern]" in str(t)
result = pd.DatetimeIndex(s, freq="infer")
tm.assert_index_equal(result, dr)
# inference
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
]
)
assert s.dtype == "datetime64[ns, US/Pacific]"
assert lib.infer_dtype(s, skipna=True) == "datetime64"
s = Series(
[
pd.Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
pd.Timestamp("2013-01-02 14:00:00-0800", tz="US/Eastern"),
]
)
assert s.dtype == "object"
assert lib.infer_dtype(s, skipna=True) == "datetime"
# with all NaT
s = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series(pd.DatetimeIndex(["NaT", "NaT"], tz="US/Eastern"))
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_construction_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units
# gh-19223
dtype = f"{dtype}[{unit}]"
arr = np.array([1, 2, 3], dtype=arr_dtype)
s = Series(arr)
result = s.astype(dtype)
expected = Series(arr.astype(dtype))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["2013-01-01 00:00:00", pd.NaT, np.nan, None])
def test_constructor_with_naive_string_and_datetimetz_dtype(self, arg):
# GH 17415: With naive string
result = Series([arg], dtype="datetime64[ns, CET]")
expected = Series(pd.Timestamp(arg)).dt.tz_localize("CET")
tm.assert_series_equal(result, expected)
def test_constructor_datetime64_bigendian(self):
# GH#30976
ms = np.datetime64(1, "ms")
arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")
result = Series(arr)
expected = Series([Timestamp(ms)])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("interval_constructor", [IntervalIndex, IntervalArray])
def test_construction_interval(self, interval_constructor):
# construction from interval & array of intervals
intervals = interval_constructor.from_breaks(np.arange(3), closed="right")
result = Series(intervals)
assert result.dtype == "interval[int64]"
tm.assert_index_equal(Index(result.values), Index(intervals))
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_interval(self, data_constructor):
# GH 23563: consistent closed results in interval dtype
data = [pd.Interval(0, 1), pd.Interval(0, 2), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(IntervalArray(data))
assert result.dtype == "interval[float64]"
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_interval_mixed_closed(self, data_constructor):
# GH 23563: mixed closed results in object dtype (not interval dtype)
data = [pd.Interval(0, 1, closed="both"), pd.Interval(0, 2, closed="neither")]
result = Series(data_constructor(data))
assert result.dtype == object
assert result.tolist() == data
def test_construction_consistency(self):
# make sure that we are not re-localizing upon construction
# GH 14928
s = Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
result = Series(s, dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.dt.tz_convert("UTC"), dtype=s.dtype)
tm.assert_series_equal(result, s)
result = Series(s.values, dtype=s.dtype)
tm.assert_series_equal(result, s)
@pytest.mark.parametrize(
"data_constructor", [list, np.array], ids=["list", "ndarray[object]"]
)
def test_constructor_infer_period(self, data_constructor):
data = [pd.Period("2000", "D"), pd.Period("2001", "D"), None]
result = pd.Series(data_constructor(data))
expected = pd.Series(period_array(data))
tm.assert_series_equal(result, expected)
assert result.dtype == "Period[D]"
def test_constructor_period_incompatible_frequency(self):
data = [pd.Period("2000", "D"), pd.Period("2001", "A")]
result = pd.Series(data)
assert result.dtype == object
assert result.tolist() == data
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range("20130101", periods=5, freq="D")
s = Series(pi)
assert s.dtype == "Period[D]"
expected = Series(pi.astype(object))
tm.assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {"a": 0.0, "b": 1.0, "c": 2.0}
result = Series(d, index=["b", "c", "d", "a"])
expected = Series([1, 2, np.nan, 0], index=["b", "c", "d", "a"])
tm.assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx, dtype=np.float64)
expected.iloc[0] = 0
expected.iloc[1] = 1
tm.assert_series_equal(result, expected)
def test_constructor_dict_list_value_explicit_dtype(self):
# GH 18625
d = {"a": [[2], [3], [4]]}
result = Series(d, index=["a"], dtype="object")
expected = Series(d, index=["a"])
tm.assert_series_equal(result, expected)
def test_constructor_dict_order(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6, else
# order by value
d = {"b": 1, "a": 0, "c": 2}
result = Series(d)
expected = Series([1, 0, 2], index=list("bac"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_constructor_dict_extension(self, data, dtype):
d = {"a": data}
result = Series(d, index=["a"])
expected = Series(data, index=["a"], dtype=dtype)
assert result.dtype == dtype
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float("nan")])
def test_constructor_dict_nan_key(self, value):
# GH 18480
d = {1: "a", value: "b", float("nan"): "c", 4: "d"}
result = Series(d).sort_values()
expected = Series(["a", "b", "c", "d"], index=[1, value, np.nan, 4])
tm.assert_series_equal(result, expected)
# MultiIndex:
d = {(1, 1): "a", (2, np.nan): "b", (3, value): "c"}
result = Series(d).sort_values()
expected = Series(
["a", "b", "c"], index=Index([(1, 1), (2, np.nan), (3, value)])
)
tm.assert_series_equal(result, expected)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ["1984-02-19", "1988-11-06", "1989-12-03", "1990-03-15"]
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, "%Y-%m-%d"))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
tm.assert_series_equal(result_datetime64, expected)
tm.assert_series_equal(result_datetime, expected)
tm.assert_series_equal(result_Timestamp, expected)
def test_constructor_dict_tuple_indexer(self):
# GH 12948
data = {(1, 1, None): -1.0}
result = Series(data)
expected = Series(
-1.0, index=MultiIndex(levels=[[1], [1], [np.nan]], codes=[[0], [0], [-1]])
)
tm.assert_series_equal(result, expected)
def test_constructor_mapping(self, non_dict_mapping_subclass):
# GH 29788
ndm = non_dict_mapping_subclass({3: "three"})
result = Series(ndm)
expected = Series(["three"], index=[3])
tm.assert_series_equal(result, expected)
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
assert list(s) == data
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
assert tuple(s) == data
def test_constructor_dict_of_tuples(self):
data = {(1, 2): 3, (None, 5): 6}
result = Series(data).sort_values()
expected = Series([3, 6], index=MultiIndex.from_tuples([(1, 2), (None, 5)]))
tm.assert_series_equal(result, expected)
def test_constructor_set(self):
values = {1, 2, 3, 4, 5}
with pytest.raises(TypeError, match="'set' type is unordered"):
Series(values)
values = frozenset(values)
with pytest.raises(TypeError, match="'frozenset' type is unordered"):
Series(values)
# https://github.com/pandas-dev/pandas/issues/22698
@pytest.mark.filterwarnings("ignore:elementwise comparison:FutureWarning")
def test_fromDict(self):
data = {"a": 0, "b": 1, "c": 2, "d": 3}
series = Series(data)
tm.assert_is_sorted(series.index)
data = {"a": 0, "b": "1", "c": "2", "d": datetime.now()}
series = Series(data)
assert series.dtype == np.object_
data = {"a": 0, "b": "1", "c": "2", "d": "3"}
series = Series(data)
assert series.dtype == np.object_
data = {"a": "0", "b": "1"}
series = Series(data, dtype=float)
assert series.dtype == np.float64
def test_fromValue(self, datetime_series):
nans = Series(np.NaN, index=datetime_series.index, dtype=np.float64)
assert nans.dtype == np.float_
assert len(nans) == len(datetime_series)
strings = Series("foo", index=datetime_series.index)
assert strings.dtype == np.object_
assert len(strings) == len(datetime_series)
d = datetime.now()
dates = Series(d, index=datetime_series.index)
assert dates.dtype == "M8[ns]"
assert len(dates) == len(datetime_series)
# GH12336
# Test construction of categorical series from value
categorical = Series(0, index=datetime_series.index, dtype="category")
expected = Series(0, index=datetime_series.index).astype("category")
assert categorical.dtype == "category"
assert len(categorical) == len(datetime_series)
tm.assert_series_equal(categorical, expected)
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1)])
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), timedelta(days=2), np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# mixed with NaT
td = Series([timedelta(days=1), NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([timedelta(days=1), np.nan], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(300000000), pd.NaT], dtype="m8[ns]")
assert td.dtype == "timedelta64[ns]"
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), NaT])
assert td.dtype == "timedelta64[ns]"
# because iNaT is int, not coerced to timedelta
td = Series([np.timedelta64(300000000), iNaT])
assert td.dtype == "object"
td = Series([np.timedelta64(300000000), np.nan])
assert td.dtype == "timedelta64[ns]"
td = Series([pd.NaT, np.timedelta64(300000000)])
assert td.dtype == "timedelta64[ns]"
td = Series([np.timedelta64(1, "s")])
assert td.dtype == "timedelta64[ns]"
# these are frequency conversion astypes
# for t in ['s', 'D', 'us', 'ms']:
# with pytest.raises(TypeError):
# td.astype('m8[%s]' % t)
# valid astype
td.astype("int64")
# invalid casting
msg = r"cannot astype a timedelta from \[timedelta64\[ns\]\] to \[int32\]"
with pytest.raises(TypeError, match=msg):
td.astype("int32")
# this is an invalid casting
msg = "Could not convert object to NumPy timedelta"
with pytest.raises(ValueError, match=msg):
Series([timedelta(days=1), "foo"], dtype="m8[ns]")
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ["foo"])
assert td.dtype == "object"
# these will correctly infer a timedelta
s = Series([None, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([np.nan, pd.NaT, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, None, "1 Day"])
assert s.dtype == "timedelta64[ns]"
s = Series([pd.NaT, np.nan, "1 Day"])
assert s.dtype == "timedelta64[ns]"
# GH 16406
def test_constructor_mixed_tz(self):
s = Series([Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")])
expected = Series(
[Timestamp("20130101"), Timestamp("20130101", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(s, expected)
def test_NaT_scalar(self):
series = Series([0, 1000, 2000, iNaT], dtype="M8[ns]")
val = series[3]
assert isna(val)
series[2] = val
assert isna(series[2])
def test_NaT_cast(self):
# GH10747
result = Series([np.nan]).astype("M8[ns]")
expected = Series([NaT])
tm.assert_series_equal(result, expected)
def test_constructor_name_hashable(self):
for n in [777, 777.0, "name", datetime(2001, 11, 11), (1,), "\u05D0"]:
for data in [[1, 2, 3], np.ones(3), {"a": 0, "b": 1}]:
s = Series(data, name=n)
assert s.name == n
def test_constructor_name_unhashable(self):
msg = r"Series\.name must be a hashable type"
for n in [["name_list"], np.ones(2), {1: 2}]:
for data in [["name_list"], np.ones(2), {1: 2}]:
with pytest.raises(TypeError, match=msg):
Series(data, name=n)
def test_auto_conversion(self):
series = Series(list(date_range("1/1/2000", periods=10)))
assert series.dtype == "M8[ns]"
def test_convert_non_ns(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype="timedelta64[s]")
s = Series(arr)
expected = Series(pd.timedelta_range("00:00:01", periods=3, freq="s"))
tm.assert_series_equal(s, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
# TODO: is the above comment still accurate/needed?
arr = np.array(
["2013-01-01", "2013-01-02", "2013-01-03"], dtype="datetime64[D]"
)
ser = Series(arr)
expected = Series(date_range("20130101", periods=3, freq="D"))
tm.assert_series_equal(ser, expected)
arr = np.array(
["2013-01-01 00:00:01", "2013-01-01 00:00:02", "2013-01-01 00:00:03"],
dtype="datetime64[s]",
)
ser = Series(arr)
expected = Series(date_range("20130101 00:00:01", periods=3, freq="s"))
tm.assert_series_equal(ser, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cant_cast_datetimelike(self, index):
# floats are not ok
# strip Index to convert PeriodIndex -> Period
# We don't care whether the error message says
# PeriodIndex or PeriodArray
msg = f"Cannot cast {type(index).__name__.rstrip('Index')}.*? to "
with pytest.raises(TypeError, match=msg):
Series(index, dtype=float)
# ints are ok
# we test with np.int64 to get similar results on
# windows / 32-bit platforms
result = Series(index, dtype=np.int64)
expected = Series(index.astype(np.int64))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
date_range("1/1/2000", periods=10),
timedelta_range("1 day", periods=10),
period_range("2000-Q1", periods=10, freq="Q"),
],
ids=lambda x: type(x).__name__,
)
def test_constructor_cast_object(self, index):
s = Series(index, dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(pd.Index(index, dtype=object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
s = Series(index.astype(object), dtype=object)
exp = Series(index).astype(object)
tm.assert_series_equal(s, exp)
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_constructor_generic_timestamp_no_frequency(self, dtype, request):
# see gh-15524, gh-15987
msg = "dtype has no unit. Please pass in"
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
with pytest.raises(ValueError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize(
"dtype,msg",
[
("m8[ps]", "cannot convert timedeltalike"),
("M8[ps]", "cannot convert datetimelike"),
],
)
def test_constructor_generic_timestamp_bad_frequency(self, dtype, msg):
# see gh-15524, gh-15987
with pytest.raises(TypeError, match=msg):
Series([], dtype=dtype)
@pytest.mark.parametrize("dtype", [None, "uint8", "category"])
def test_constructor_range_dtype(self, dtype):
# GH 16804
expected = Series([0, 1, 2, 3, 4], dtype=dtype or "int64")
result = Series(range(5), dtype=dtype)
tm.assert_series_equal(result, expected)
def test_constructor_tz_mixed_data(self):
# GH 13051
dt_list = [
Timestamp("2016-05-01 02:03:37"),
Timestamp("2016-04-30 19:03:37-0700", tz="US/Pacific"),
]
result = Series(dt_list)
expected = Series(dt_list, dtype=object)
tm.assert_series_equal(result, expected)
def test_constructor_data_aware_dtype_naive(self, tz_aware_fixture):
# GH#25843
tz = tz_aware_fixture
result = Series([Timestamp("2019", tz=tz)], dtype="datetime64[ns]")
expected = Series([Timestamp("2019")])
tm.assert_series_equal(result, expected)
def test_constructor_datetime64(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
series = Series(dates)
assert np.issubdtype(series.dtype, np.dtype("M8[ns]"))
def test_constructor_datetimelike_scalar_to_string_dtype(self):
# https://github.com/pandas-dev/pandas/pull/33846
result = Series("M", index=[1, 2, 3], dtype="string")
expected = | pd.Series(["M", "M", "M"], index=[1, 2, 3], dtype="string") | pandas.Series |
from surf.script_tab import keytab
from surf.surf_tool import regex2pairs
import os, json, time, re, codecs, glob, shutil
import matplotlib.pyplot as plt
import matplotlib as mpl
import logging.handlers
import pandas as pd
import itertools
import numpy as np
import random
import tensorflow as tf
from sklearn.model_selection import KFold
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
group_gap : int, default=None
Gap between train and test
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate((train_array,
train_array_tmp)),
axis=None), axis=None)
train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate((test_array,
test_array_tmp)),
axis=None), axis=None)
test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in test_array]
# 拆分时间序列的类
class PurgedGroupTimeSeriesSplitStacking(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
stacking_mode : bool, default=True
Whether to provide an additional set to test a stacking classifier or not.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
max_val_group_size : int, default=Inf
Maximum group size for a single validation set.
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split, if stacking_mode = True and None
it defaults to max_val_group_size.
val_group_gap : int, default=None
Gap between train and validation
test_group_gap : int, default=None
Gap between validation and test, if stacking_mode = True and None
it defaults to val_group_gap.
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
stacking_mode=True,
max_train_group_size=np.inf,
max_val_group_size=np.inf,
max_test_group_size=np.inf,
val_group_gap=None,
test_group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.max_val_group_size = max_val_group_size
self.max_test_group_size = max_test_group_size
self.val_group_gap = val_group_gap
self.test_group_gap = test_group_gap
self.verbose = verbose
self.stacking_mode = stacking_mode
def split(self, X, y=None, groups=None):
if self.stacking_mode:
return self.split_ensemble(X, y, groups)
else:
return self.split_standard(X, y, groups)
def split_standard(self, X, y=None, groups=None):
"""Generate indices to split data into training and validation set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/validation set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
group_gap = self.val_group_gap
max_val_group_size = self.max_val_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_val_starts = range(n_groups - n_splits * group_val_size, n_groups, group_val_size)
for group_val_start in group_val_starts:
train_array = []
val_array = []
group_st = max(0, group_val_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_val_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[group_val_start: group_val_start + group_val_size]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in val_array]
def split_ensemble(self, X, y=None, groups=None):
"""Generate indices to split data into training, validation and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
val : ndarray
The validation set indices for that split (testing indices for base classifiers).
test : ndarray
The testing set indices for that split (testing indices for final classifier)
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_splits = self.n_splits
val_group_gap = self.val_group_gap
test_group_gap = self.test_group_gap
if test_group_gap is None:
test_group_gap = val_group_gap
max_train_group_size = self.max_train_group_size
max_val_group_size = self.max_val_group_size
max_test_group_size = self.max_test_group_size
if max_test_group_size is None:
max_test_group_size = max_val_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds, n_groups))
group_val_size = min(n_groups // n_folds, max_val_group_size)
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size, n_groups, group_test_size)
train_indices = []
val_indices = []
test_indices = []
for group_test_start in group_test_starts:
train_array = []
val_array = []
test_array = []
val_group_st = max(max_train_group_size + val_group_gap,
group_test_start - test_group_gap - max_val_group_size)
train_group_st = max(0, val_group_st - val_group_gap - max_train_group_size)
for train_group_idx in unique_groups[train_group_st:(val_group_st - val_group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(np.concatenate((train_array, train_array_tmp)), axis=None), axis=None)
train_end = train_array.size
for val_group_idx in unique_groups[val_group_st:(group_test_start - test_group_gap)]:
val_array_tmp = group_dict[val_group_idx]
val_array = np.sort(np.unique(np.concatenate((val_array, val_array_tmp)), axis=None), axis=None)
val_array = val_array[val_group_gap:]
for test_group_idx in unique_groups[group_test_start:(group_test_start + group_test_size)]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(np.concatenate((test_array, test_array_tmp)), axis=None), axis=None)
test_array = test_array[test_group_gap:]
yield [int(i) for i in train_array], [int(i) for i in val_array], [int(i) for i in test_array]
def sharp_ratio(data, base_ratio=0.0):
num = len(data)
t_return = (data.shift(-1) - data) / data
std = t_return.std()
sharpratio = (t_return.mean() - base_ratio) * (np.sqrt(num)) / std
return sharpratio
class Pre_data(object):
def __init__(self):
self.funcmap = {
"种子": self.set_all_seeds,
"填充": self.pipe_pad,
# 一个数组栏,一个dataframe
"取列": self.split_columns,
"取行": self.split_rows,
}
def set_all_seeds(self, dataobj, seed):
np.random.seed(seed)
random.seed(seed)
# tf.random.set_seed(seed)
return dataobj
def pipe_pad(self, dataobj, paras={}):
if paras["值"] is None:
if paras["方式"] == "向前":
# 再向上填充
dataobj.fillna(method='bfill', inplace=True)
elif paras["方式"] == "向后":
# 先向下填充
dataobj.fillna(method='ffill', inplace=True)
else:
raise Exception("paras error {}".format(paras))
else:
dataobj.fillna(value=paras["值"], inplace=True)
return dataobj
def split_columns(self, dataobj, paras):
return dataobj[paras]
def split_rows(self, dataobj, paras):
if isinstance(paras[0], str):
outdata = dataobj.loc[paras[0]:]
elif isinstance(paras[0], int):
outdata = dataobj.iloc[paras[0]:]
else:
raise Exception("type error {}".format(paras))
if isinstance(paras[1], str):
outdata = outdata.loc[:paras[1]]
elif isinstance(paras[1], int):
outdata = outdata.iloc[:paras[1]]
else:
raise Exception("type error {}".format(paras))
return outdata
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class Train_split(object):
def __init__(self):
self.funcmap = {
# 一个数组栏,一个dataframe
"拆分": self.split_train_test,
}
def split_train_test(self, dataobj, paras):
outlist = []
if isinstance(paras[0], str):
outlist.append(dataobj.loc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.loc[paras[0]:paras[1]])
outlist.append(dataobj.loc[paras[1]:])
else:
outlist.append(dataobj.loc[paras[0]:])
elif isinstance(paras[0], int):
outlist.append(dataobj.iloc[:paras[0]])
if len(paras) > 1:
outlist.append(dataobj.iloc[paras[0]:paras[1]])
outlist.append(dataobj.iloc[paras[1]:])
else:
outlist.append(dataobj.iloc[paras[0]:])
elif isinstance(paras[0], float):
tsplit = len(dataobj)
tsplit1 = int(tsplit * paras[0])
outlist.append(dataobj.iloc[:tsplit1])
if len(paras) > 1:
tsplit2 = int(tsplit * sum(paras))
outlist.append(dataobj.iloc[tsplit1:tsplit2])
outlist.append(dataobj.iloc[tsplit2:])
else:
outlist.append(dataobj.iloc[tsplit1:])
else:
raise Exception("type error {}".format(paras))
return outlist
def __call__(self, infiles, commands):
outdata = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
# 顺序处理
for command in commands:
tkey = list(command.keys())[0]
pdobj = self.funcmap[tkey](pdobj, command[tkey])
outdata.append(pdobj)
return outdata
class SequenceChara(object):
def __init__(self):
self.funcmap = {
"均值n": self.mean_n,
"标准差n": self.std_n,
"涨幅比n": self.ratio_n,
"回撤n": self.draw_n,
"最涨n": self.maxrise_n,
"夏普n": self.sharp_n,
"label_最大n": self.l_max_n,
"label_最小n": self.l_min_n,
"label_回撤n": self.l_draw_n,
"label_最涨n": self.l_maxrise_n,
}
def mean_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).mean()
return outdata
def std_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).std()
return outdata
def ratio_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).apply(lambda x: x[-1] / x[0])
return outdata
def draw_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxfallret = pd.Series(index=pricepd.index)
for i in range(0, len(dataobj) - n):
tmpsec = pricepd[i + 1:i + n + 1]
tmpmax = pricepd[i]
tmpmin = pricepd[i]
tmpdrawdown = [1.0]
for t in range(0, n):
if tmpsec[t] > tmpmax:
tmpmax = tmpsec[t]
tmpdrawdown.append(tmpdrawdown[-1])
elif tmpsec[t] <= tmpmin:
tmpmin = tmpsec[t]
tmpdrawdown.append(tmpmin / tmpmax)
else:
pass
maxfallret[i] = min(tmpdrawdown)
return maxfallret
def maxrise_n(self, dataobj, n):
pricepd = dataobj.iloc[:, 0]
maxraiseret = pd.Series(index=pricepd.index)
for i in range(0, len(dataobj) - n):
tmpsec = pricepd[i + 1:i + n + 1]
tmpmax = pricepd[i]
tmpmin = pricepd[i]
tmpdrawup = [1.0]
for t in range(0, n):
if tmpsec[t] > tmpmax:
tmpmax = tmpsec[t]
tmpdrawup.append(tmpmax / tmpmin)
elif tmpsec[t] <= tmpmin:
tmpmin = tmpsec[t]
tmpdrawup.append(tmpdrawup[-1])
else:
pass
maxraiseret[i] = max(tmpdrawup)
return maxraiseret
def sharp_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).apply(sharp_ratio)
return outdata
def l_max_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).max()
outdata = outdata.shift(-n)
return outdata
def l_min_n(self, dataobj, n):
outdata = dataobj.iloc[:, 0].rolling(window=n, center=False).min()
outdata.shift(-n)
return outdata
def l_draw_n(self, dataobj, n):
outdata = self.draw_n(dataobj, n)
outdata.shift(-n)
return outdata
def l_maxrise_n(self, dataobj, n):
outdata = self.maxrise_n(dataobj, n)
outdata.shift(-n)
return outdata
def __call__(self, infiles, commands):
outdata = []
colhead = []
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
delhead = pdobj.columns[0]
colhead.append(delhead)
# 并行处理
toutd = []
for command in commands:
tkey = list(command.keys())[0]
outobj = self.funcmap[tkey](pdobj, command[tkey])
toutd.append(outobj)
outdata.append(toutd)
return outdata, colhead
class CharaExtract(object):
def __init__(self):
self.funcmap = {
"profit_avelog": self.profit_avelog,
"胜率": self.win_ratio,
"回撤": self.draw_n,
"最涨": self.rise_n,
"夏普": self.sharp_n,
}
def profit_avelog(self, dataobj):
return np.log(dataobj.iloc[-1, 0] / dataobj.iloc[0, 0]) / len(dataobj)
def win_ratio(self, dataobj):
pricepd = dataobj.diff()
pricepd = np.array(pricepd.iloc[:, 0])
posinum = len(pricepd[pricepd > 0])
allnum = len(pricepd[~np.isnan(pricepd)])
return float(posinum) / allnum
def draw_n(self, dataobj):
pricepd = dataobj.iloc[:, 0]
n = len(dataobj)
tmpsec = pricepd[0:n]
tmpmax = pricepd[0]
tmpmin = pricepd[0]
tmpdrawdown = [1.0]
for i in range(1, n):
if tmpsec[i] > tmpmax:
tmpmax = tmpsec[i]
tmpdrawdown.append(tmpdrawdown[-1])
elif tmpsec[i] <= tmpmin:
tmpmin = tmpsec[i]
tmpdrawdown.append(tmpmin / tmpmax)
else:
pass
return min(tmpdrawdown)
def rise_n(self, dataobj):
pricepd = dataobj.iloc[:, 0]
n = len(dataobj)
tmpsec = pricepd[0:n]
tmpmax = pricepd[0]
tmpmin = pricepd[0]
tmpdrawup = [1.0]
for i in range(1, n):
if tmpsec[i] > tmpmax:
tmpmax = tmpsec[i]
tmpdrawup.append(tmpmax / tmpmin)
elif tmpsec[i] <= tmpmin:
tmpmin = tmpsec[i]
tmpdrawup.append(tmpdrawup[-1])
else:
pass
return max(tmpdrawup)
def sharp_n(self, dataobj):
tsr = sharp_ratio(dataobj)
return tsr[0]
def __call__(self, infiles, commands):
outdatas = [{"filename": [], i1: []} for i1 in commands]
for i1, command in enumerate(commands):
# 并行处理
for infile in infiles:
pdobj = pd.read_csv(infile, header=0, encoding="utf8")
pdobj.set_index("date", inplace=True)
pdobj = pdobj[[pdobj.columns[0]]]
outobj = self.funcmap[command](pdobj)
ttinfile = os.path.split(infile)[1]
outdatas[i1]["filename"].append(ttinfile)
outdatas[i1][command].append(outobj)
outdatapds = []
for i1 in outdatas:
tpd = pd.DataFrame(i1)
tpd.set_index("filename", inplace=True)
outdatapds.append(tpd)
return outdatapds
class DataMerge(object):
def __init__(self):
pass
def __call__(self, oriinfiles, projectpath):
# 1. 只支持 前后统配合并,去掉前后的 *
pdobjlist, matchstrlist = regex2pairs(oriinfiles, projectpath)
outfilelist = [i1[0] + "_".join(["origin" if i2 == "" else i2 for i2 in i1[1]]) + i1[2] for i1 in matchstrlist]
outpdobjlist = [pd.concat(i1, axis=1) for i1 in pdobjlist]
return outpdobjlist, outfilelist
class DataCopy(object):
def __init__(self):
pass
def __call__(self, oriinfiles, prefix, projectpath):
infiles = [glob.glob(os.path.join(projectpath, i2)) for i2 in oriinfiles]
infiles = set(itertools.chain(*infiles)) # 展开去重
for infile in infiles:
(filepath, ofile) = os.path.split(infile)
shutil.copy(infile, os.path.join(filepath, prefix + ofile))
return None
class DataCalc(object):
def __init__(self):
self.funcmap = {
"+": self.add,
"-": self.mins,
"*": self.multi,
"/": self.divide,
"**": self.ppower,
}
self.symbolmap = {
"+": "加",
"-": "减",
"*": "乘",
"/": "除",
"**": "幂",
}
def add(self, dataobj, commandstr, float_f=None, float_b=None):
if float_b is None and float_f is None:
outdata = dataobj[0].iloc[:, 0] + dataobj[1].iloc[:, 0]
outdata = pd.DataFrame(outdata)
outdata.rename(columns={0: "_".join([dataobj[0].columns[0], commandstr, dataobj[1].columns[0]])},
inplace=True)
elif float_b is not None:
outdata = dataobj[0].iloc[:, 0] + float_b
outdata = | pd.DataFrame(outdata) | pandas.DataFrame |
import numpy as np
import imageio
import os
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
from brainio_base.stimuli import StimulusSet
class Stimulus:
def __init__(self, size_px=[448, 448], bit_depth=8,
stim_id=1000, save_dir='images', type_name='stimulus',
format_id='{0:04d}'):
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
self.type_name = type_name
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
self.size_px = size_px
self.objects = []
self.stimulus = np.ones(self.size_px, dtype=np.uint8) * self.gray
def add_object(self, stim_object):
self.objects.append(stim_object)
def build_stimulus(self):
for obj in self.objects:
self.stimulus[obj.mask] = obj.stimulus[obj.mask]
def clear_stimulus(self):
self.stimulus = np.ones(self.size, dtype=np.uint8) * self.gray
def show_stimulus(self):
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
file_name= self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
class Grating:
def __init__(self, orientation=0, phase=0, sf=2, size_px=[448, 448], width=8,
contrast=1, bit_depth=8, pos=[0, 0], rad=5, sig=0,
stim_id=1000, format_id='{0:04d}', save_dir='images', type_name='grating'):
# save directory
self.save_dir = save_dir
self.stim_id = stim_id
self.format_id = format_id
# label for type of stimulus
self.type_name = type_name
# 1 channel colors, white, black, grey
self.white = np.uint8(2**bit_depth-1)
self.black = np.uint8(0)
self.gray = np.uint8(self.white/2+1)
# pixel dimensions of the image
self.size_px = np.array(size_px)
# position of image in field of view
self.pos = np.array(pos)
# pixel to visual field degree conversion
self.px_to_deg = self.size_px[1] / width
# size of stimulus in visual field in degrees
self.size = self.size_px / self.px_to_deg
# orientation in radians
self.orientation = orientation / 180 * np.pi
# phase of the grating
self.phase = phase / 180 * np.pi
# spatial frequency of the grating
self.sf = sf
# contrast of the grating
self.contrast = contrast
# make self.xv and self.yv store the degree positions of all pixels in the image
self.xv = np.zeros(size_px)
self.yv = np.zeros(size_px)
self.update_frame()
self.mask = np.ones(size_px, dtype=bool)
self.set_circ_mask(rad=rad)
self.tex = np.zeros(size_px)
self.stimulus = np.ones(size_px, dtype=np.uint8) * self.gray
self.envelope = np.ones(size_px)
if sig is 0:
self.update_tex()
else:
self.set_gaussian_envelope(sig)
def update_frame(self):
x = (np.arange(self.size_px[1]) - self.size_px[1]/2) / self.px_to_deg - self.pos[1]
y = (np.arange(self.size_px[0]) - self.size_px[0]/2) / self.px_to_deg - self.pos[0]
# all possible degree coordinates in matrices of points
self.xv, self.yv = np.meshgrid(x, y)
def update_tex(self):
# make the grating pattern
self.tex = (np.sin((self.xv * np.cos(self.orientation) + self.yv * np.sin(self.orientation)) *
self.sf * 2 * np.pi + self.phase) * self.contrast * self.envelope)
def update_stimulus(self):
self.stimulus[self.mask] = np.uint8(((self.tex[self.mask]+1)/2)*self.white)
self.stimulus[np.logical_not(self.mask)] = self.gray
def set_circ_mask(self, rad):
# apply operation to put a 1 for all points inclusively within the degree radius and a 0 outside it
self.mask = self.xv**2 + self.yv**2 <= rad ** 2
# same as circular mask but for an annulus
def set_annular_mask(self, inner_rad, outer_rad):
self.mask = (self.xv ** 2 + self.yv ** 2 <= outer_rad ** 2) * \
(self.xv ** 2 + self.yv ** 2 > inner_rad ** 2)
def set_gaussian_envelope(self, sig):
d = np.sqrt(self.xv**2 + self.yv**2)
self.envelope = np.exp(-d**2/(2 * sig**2))
self.update_tex()
def show_stimulus(self):
# pyplot stuff
self.update_stimulus()
my_dpi = 192
fig = plt.figure()
fig.set_size_inches(self.size_px[1] / my_dpi, self.size_px[0] / my_dpi, forward=False)
ax = plt.axes([0, 0, 1, 1])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(self.stimulus, cmap='gray')
plt.show()
def save_stimulus(self):
# save to correct (previously specified) directory
self.update_stimulus()
file_name = self.type_name + '_' + self.format_id.format(self.stim_id) + '.png'
imageio.imwrite(self.save_dir + os.sep + file_name, self.stimulus)
return file_name
def load_stim_info(stim_name, data_dir):
stim = pd.read_csv(os.path.join(data_dir, 'stimulus_set'), dtype={'image_id': str})
image_paths = dict((key, value) for (key, value) in zip(stim['image_id'].values,
[os.path.join(data_dir, image_name) for image_name
in stim['image_file_name'].values]))
stim_set = StimulusSet(stim[stim.columns[:-1]])
stim_set.image_paths = image_paths
stim_set.identifier = stim_name
return stim_set
def gen_blank_stim(degrees, size_px, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
stim = Stimulus(size_px=[size_px, size_px], type_name='blank_stim', save_dir=save_dir, stim_id=0)
stimuli = pd.DataFrame({'image_id': str(0), 'degrees': [degrees]})
image_names = (stim.save_stimulus())
stimuli['image_file_name'] = pd.Series(image_names)
stimuli['image_current_local_file_path'] = pd.Series(save_dir + os.sep + image_names)
stimuli.to_csv(save_dir + os.sep + 'stimulus_set', index=False)
def gen_grating_stim(degrees, size_px, stim_name, grat_params, save_dir):
if not (os.path.isdir(save_dir)):
os.mkdir(save_dir)
width = degrees
nStim = grat_params.shape[0]
print('Generating stimulus: #', nStim)
stimuli = pd.DataFrame({'image_id': [str(n) for n in range(nStim)], 'degrees': [width] * nStim})
image_names = nStim * [None]
image_local_file_path = nStim * [None]
all_y = nStim * [None]
all_x = nStim * [None]
all_c = nStim * [None]
all_r = nStim * [None]
all_s = nStim * [None]
all_o = nStim * [None]
all_p = nStim * [None]
for i in np.arange(nStim):
stim_id = np.uint64(grat_params[i, 0] * 10e9 + grat_params[i, 1] * 10e7 + grat_params[i, 3] * 10e5 +
grat_params[i, 4] * 10e3 + grat_params[i, 5] * 10e1 + grat_params[i, 6])
grat = Grating(width=width, pos=[grat_params[i, 0], grat_params[i, 1]], contrast=grat_params[i, 2],
rad=grat_params[i, 3], sf=grat_params[i, 4], orientation=grat_params[i, 5],
phase=grat_params[i, 6], stim_id= stim_id, format_id='{0:012d}', save_dir=save_dir,
size_px=[size_px, size_px], type_name=stim_name)
image_names[i] = (grat.save_stimulus())
image_local_file_path[i] = save_dir + os.sep + image_names[i]
all_y[i] = grat_params[i, 0]
all_x[i] = grat_params[i, 1]
all_c[i] = grat_params[i, 2]
all_r[i] = grat_params[i, 3]
all_s[i] = grat_params[i, 4]
all_o[i] = grat_params[i, 5]
all_p[i] = grat_params[i, 6]
stimuli['position_y'] = pd.Series(all_y)
stimuli['position_x'] = pd.Series(all_x)
stimuli['contrast'] = | pd.Series(all_c) | pandas.Series |
import pandas as pd
path='C:/Users/alightner/Documents/Source_Updates/ESDB/Database/'
import country_converter as coco
cc = coco.CountryConverter()
def transform_to_codes(data, col, new_col_name, name_type='ISO3'):
data.replace('Congo, Dem. Rep.', 'DR Congo', inplace=True)
data[col_name] = cc.convert(names =list(data[col]), to='ISO3', not_found=None)
return data
def merge_country_name(data, left_on='country_id', right_on='country_id',country_name=['country_name'], file=path):
# read excel file, select vars of interest [[ ]]
df_countries = pd.read_sas(file+'countries.sas7bdat')
# decode string vars from sas
for i in country_name:
df_countries[i] = df_countries[i].str.decode('UTF-8')
# merge data on column of choice
df = pd.merge(data, df_countries[['country_id']+ country_name], left_on=left_on, right_on=right_on, how='left')
# print the names which do not merge
print(df[~df[right_on].notnull()][left_on].unique())
return df
def merge_country_class(data, class_type='World Bank Income'):
'''Provide data, the variable you would like to merge on, and the type of income
category the user would like to examine. Need to expand to other types of income groups.'''
# set to shared file location in the future file =
file1 = 'C:/Users/alightner/Documents/Source Updates/Gen Data'
# bring in relevant data sources
codes = pd.read_sas(file1 + '/country_classification_values.sas7bdat')
values = pd.read_sas(file1 + '/classification_values.sas7bdat')
# change classification value name in values to UTU-08
values['classification_value_name'] = values['classification_value_name'].str.decode('UTF-8')
# if class == 'World Bank Income' then just merge these codes
if class_type =='World Bank Income':
# keep only the WB codes (first 4 observations)
values = values.iloc[0:4, :]
# keep only the country code values where classif.. is bewteen 0 and 4.
codes = codes[codes['classification_value_id'].between(0,4)]
# merge codes to dataset provided.
classif = pd.merge(codes, values, on='classification_value_id', how='left')
# rename class_year to year to limit repetitiveness
classif.rename(index=str, columns={"classification_year": "year"}, inplace=True)
# select only the max year
max_year = max(list(classif['year'].unique()))
# select the most recent year
classif = classif[classif['year']==max_year]
# drop year
classif.drop('year', axis=1, inplace=True)
# merge datasets
df = pd.merge(data, classif, on=['country_id'], how='left')
return df
def merge_series_names(data, include_vars=['series_name'], file='C:/Users/alightner/Documents/Source Updates/029 ILO/'):
# read excel file, select vars of interest [[ ]]
df_series = | pd.read_excel(file+'Mappings/mapping series.xlsx') | pandas.read_excel |
## Online battery validation
import os
import glob
import pandas as pd
import numpy as np
import pickle
class BESS(object):
def __init__(self, max_energy, max_power, init_soc_proc, efficiency):
self.soc = init_soc_proc
self.max_e_capacity = max_energy
self.efficiency = efficiency
self.energy = self.max_e_capacity * (self.soc)/100
self.power = max_power
def calculate_NLF(self, net_load_day):
""" Net load factor
"""
df = pd.DataFrame(net_load_day).abs()
NLF = df.mean()/df.max()
return NLF[0]
def calculate_SBSPM(self, NR, LE, UE, error=0.01):
"""
Calculates second by second Service Performance Measure (SBSPM)
"""
if (NR >= LE - error) and (NR <= UE + error):
SBSPM = 1
elif (NR > UE + error):
SBSPM = max([1-abs(NR - UE), 0])
elif (NR < LE - error):
SBSPM = max([1-abs(NR - LE), 0])
else:
raise ValueError('The NR is undefined {}'.format(NR))
return SBSPM
def average_SPM_over_SP(self, SBSPM_list):
"""
Averages SPM over Settlement period
"""
SPM = sum(SBSPM_list)/1800
return SPM
def check_availability(self, SPM):
"""
Returns availability factor
"""
if SPM >= 0.95:
availability_factor = 1
elif (SPM >= 0.75) and (SPM < 0.95):
availability_factor = 0.75
elif (SPM >= 0.5) and (SPM < 0.75):
availability_factor = 0.5
elif (SPM < 0.5):
availability_factor = 0
return availability_factor
def save_to_pickle(name, list_to_save, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'wb') as f:
pickle.dump(list_to_save, f)
return
def load_from_pickle(name, save_path):
with open(os.path.join(save_path, '{}.pkl'.format(name)), 'rb') as f:
p = pickle.load(f)
return p
import os
import pandas as pd
path = "."
bess_name = "sonnen"
apath = os.path.join(path, 'simulations_{}'.format(bess_name), '{}'.format(1),'agent_{}.csv'.format(1))
nl = pd.read_csv(apath).loc[:,['nl_a{}'.format(1)]]
pb = pd.read_csv(apath).loc[:,['pb_a{}'.format(1)]]
c_reg = | pd.read_csv(apath) | pandas.read_csv |
from src.sql_table import DummySqlDB
import pandas as pd
import pytest
@pytest.fixture()
def mocking_session():
class SessionMocker():
def __init__(self):
pass
def execute(self, *args):
if args:
return [arg for arg in args]
def commit(self):
print('committed')
def close(self):
print('closed')
yield SessionMocker
class TestDummyDB():
@pytest.fixture()
# This fixture will only be available within the scope of TestGroup
def mock(self, mocker):
mocker.patch('src.sql_table.DummySqlDB._create_engine').return_value = 'test_string'
mocker.patch('src.sql_table.DummySqlDB.query_sql').return_value = | pd.DataFrame({'user': ['test']}) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
| tm.close() | pandas.util.testing.close |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
from decimal import Decimal
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from databricks import koalas
from databricks.koalas.testing.utils import ReusedSQLTestCase
class ReshapeTest(ReusedSQLTestCase):
def test_get_dummies(self):
for data in [pd.Series([1, 1, 1, 2, 2, 1, 3, 4]),
# pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category'),
# pd.Series(pd.Categorical([1, 1, 1, 2, 2, 1, 3, 4], categories=[4, 3, 2, 1])),
pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'b': pd.Categorical(list('abcdabcd')),
'b': list('abcdabcd')})]:
exp = pd.get_dummies(data)
ddata = koalas.from_pandas(data)
res = koalas.get_dummies(ddata)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_object(self):
df = pd.DataFrame({'a': [1, 2, 3, 4, 4, 3, 2, 1],
# 'a': pd.Categorical([1, 2, 3, 4, 4, 3, 2, 1]),
'b': list('abcdabcd'),
# 'c': pd.Categorical(list('abcdabcd')),
'c': list('abcdabcd')})
ddf = koalas.from_pandas(df)
# Explicitly exclude object columns
exp = pd.get_dummies(df, columns=['a', 'c'])
res = koalas.get_dummies(ddf, columns=['a', 'c'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.b)
res = koalas.get_dummies(ddf.b)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df, columns=['b'])
res = koalas.get_dummies(ddf, columns=['b'])
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_date_datetime(self):
df = pd.DataFrame({'d': [datetime.date(2019, 1, 1),
datetime.date(2019, 1, 2),
datetime.date(2019, 1, 1)],
'dt': [datetime.datetime(2019, 1, 1, 0, 0, 0),
datetime.datetime(2019, 1, 1, 0, 0, 1),
datetime.datetime(2019, 1, 1, 0, 0, 0)]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.d)
res = koalas.get_dummies(ddf.d)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.dt)
res = koalas.get_dummies(ddf.dt)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_boolean(self):
df = pd.DataFrame({'b': [True, False, True]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.b)
res = koalas.get_dummies(ddf.b)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_decimal(self):
df = pd.DataFrame({'d': [Decimal(1.0), Decimal(2.0), Decimal(1)]})
ddf = koalas.from_pandas(df)
exp = pd.get_dummies(df)
res = koalas.get_dummies(ddf)
self.assertPandasAlmostEqual(res.toPandas(), exp)
exp = pd.get_dummies(df.d)
res = koalas.get_dummies(ddf.d)
self.assertPandasAlmostEqual(res.toPandas(), exp)
def test_get_dummies_kwargs(self):
# s = pd.Series([1, 1, 1, 2, 2, 1, 3, 4], dtype='category')
s = | pd.Series([1, 1, 1, 2, 2, 1, 3, 4]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: silviapagliarini
"""
import os
import numpy as np
import xlsxwriter
import pandas as pd
import csv
def modal(babies, judges_list_name, args):
"""
For a given recording, compute the modal value across listeners.
Output:
For each baby, a table containing:
- the vocalization list (start and end times)
- lena label for the vocalization
- the prominences of all the judges for each vocalization
- the modal value for each vocalization
- the label chosen depending on the modal value as the "average label"
- Lena-like file containing all the labels (re-labeled infants and others)
"""
for b in range(0, len(babies)):
print(babies[b])
# Prepare how many vocalizations in the recording
n_test_table = pd.read_csv(args.data_dir + '/' + babies[b] + '_scrubbed_CHNrelabel_' + judges_list_name[1] + '_1.csv')
n_test = len(n_test_table["startSeconds"])
n_test_start = n_test_table["startSeconds"]
n_test_end = n_test_table["endSeconds"]
# Lena labels
lena = pd.read_csv(args.data_dir + '/' + babies[b] + '_segments.csv')
lena_labels = lena["segtype"]
lena_startsec = lena["startsec"]
lena_endsec = lena["endsec"]
CHNSP_pos = np.where(lena_labels == 'CHNSP')[0]
CHNNSP_pos = np.where(lena_labels == 'CHNNSP')[0]
pos = np.append(CHNSP_pos, CHNNSP_pos)
pos = sorted(pos)
# Prominence assigned by the listeners
prominence = np.zeros((len(judges_list_name), n_test))
for j in range(0, len(judges_list_name)):
human_table = pd.read_csv(args.data_dir + '/' + babies[b] + '_scrubbed_CHNrelabel_' + judges_list_name[j] + '_1.csv')
human = | pd.DataFrame.to_numpy(human_table) | pandas.DataFrame.to_numpy |
import pandas as pd
import numpy as np
from pathlib import Path
from sklearn.utils.extmath import cartesian
from itertools import product
from sklearn import preprocessing
import gc
class contest(object):
__preferredColumnOrder = ['item_id','shop_id','date_block_num','quarter','half','year','item_category_id','new_item','new_shop_item',
'mode_item_price_month','min_item_price_month','max_item_price_month','mean_item_price_month',
'mean_item_category_price_month','min_item_category_price_month','max_item_category_price_month', 'mode_item_category_price_month']
def __init__(self, trainDataFile, testDataFile, itemDataFile, categoryDataFile):
#validate that files were passed in and exist at location provided by caller
if (not trainDataFile) | (not testDataFile) | (not itemDataFile) | (not categoryDataFile):
raise RuntimeError('file locations must be provided for train, test, items, and category data.')
for i,x in [[trainDataFile,'Train'], [testDataFile,'Test'], [itemDataFile, 'Item'], [categoryDataFile, 'Category']]:
i = str(i).replace('\\','/').strip()
if not Path(i).is_file():
raise RuntimeError('%s data file speicified [{%s}] does not exist.' % (x, i))
if x == 'Train':
self.__orig_trainDataFile = i
elif x == 'Test':
self.__orig_testDataFile = i
elif x == 'Item':
self.__orig_itemDataFile = i
else:
self.__orig_categoryDataFile = i
self.__out_trainDataFile = self.__outputFile(self.__orig_trainDataFile, 'pp_data_')
self.__out_trainLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'pp_labels_')
self.__out_testDataFile = self.__outputFile(self.__orig_testDataFile, 'pp_')
self.__out_validateTrainDataFile = self.__outputFile(self.__orig_trainDataFile, 'val_train_data_')
self.__out_validateTrainLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'val_train_labels_')
self.__out_validateTestDataFile = self.__outputFile(self.__orig_trainDataFile, 'val_test_data_')
self.__out_validateTestLabelsFile = self.__outputFile(self.__orig_trainDataFile, 'val_test_labels_')
def __outputFile(self, inFile, prefix):
x = inFile.split('/')
x[len(x) - 1] = prefix + x[len(x) - 1]
x = "/".join(x)
return x
def __downcast(self, df):
#reduce all float and int 64 values down to 32-bit to save memory
floats = [c for c in df if df[c].dtype == 'float64']
ints = [c for c in df if df[c].dtype == 'int64']
df[floats] = df[floats].astype(np.float32)
df[ints] = df[ints].astype(np.int32)
return df
def __openFilePrepared(self, fileName):
#open all files with no pre-specified index; downcast numeric data from 64 to 32-bit
df = pd.read_csv(fileName, index_col=False)
df = self.__downcast(df)
return df
def __getUniqueShopItems(self, train):
unique_shop_items = train[['shop_id','item_id']].drop_duplicates(keep='first')
x = pd.DataFrame(unique_shop_items.groupby(['item_id']).agg({'shop_id':'count'}).rename(columns={'shop_id':'is_unique_to_shop_item'})).reset_index()
x = x[(x.is_unique_to_shop_item == 1)]
unique_shop_items = unique_shop_items.set_index(['item_id'])
unique_shop_items = unique_shop_items.merge(x, left_index=True, right_on=['item_id'], how='left').fillna(0)
unique_shop_items.is_unique_to_shop_item = np.int8(unique_shop_items.is_unique_to_shop_item)
unique_shop_items.shop_id = np.int32(unique_shop_items.shop_id)
unique_shop_items.item_id = np.int32(unique_shop_items.item_id)
return unique_shop_items
def __aggregateTrainByMonth(self, train, items, categories, verbose=False):
#clean and aggregate training data
if verbose:
print("\tCleaning known training errors and duplicates...")
### CLEAN KNOWN ISSUES ###
#a negative price exists in the train set; fix it first before aggregation
train.loc[(train.item_price == -1), 'item_price'] = 1249.0
#there is also a 'giant' item price out there; a singular value of 307,960
#it is currently a product called Radmin 3 and item 6066 is a multiple of another product, 6065 (522x vs. 1x)
#item 6066 never happens in the test set, but the train set has it. Let's get rid of 6066 and change it with 6065
train.loc[(train.item_id == 6066), ['item_price','item_id','item_cnt_day']] = [1299.0, 6065, 1]
#drop duplicates
train.date = pd.to_datetime(train.date, format='%d.%m.%Y', errors='coerce')
train = train.drop_duplicates(keep='first')
if verbose:
print("\tAggregating item quantities on a per item + shop + date block basis...")
train = pd.DataFrame(train.groupby(['item_id','shop_id','date_block_num'],
as_index=False).agg({'item_cnt_day':['sum','count','std']})).fillna(0.0)
train.columns = ['item_id','shop_id','date_block_num','item_cnt_month','customer_sales','item_cnt_month_std']
train.item_cnt_month = np.clip(train.item_cnt_month, 0, 20)
train.loc[(train.item_cnt_month == 0) & (train.customer_sales > 0), 'customer_sales'] = 0
if verbose:
print("\tCreating cartesian product of all shops and items on a per date block basis to simulate TEST dataset...")
#create cartesian products of shops x items on a per month basis
train_temp = []
for i in range(train.date_block_num.min(), (train.date_block_num.max() + 1), 1):
date_slice = train[(train.date_block_num == i)]
train_temp.append(np.array(cartesian((date_slice.item_id.unique(), date_slice.shop_id.unique(), [i]))))
train_temp = pd.DataFrame(np.vstack(train_temp), columns = ['item_id','shop_id', 'date_block_num'], dtype=np.int32)
train = pd.merge(train_temp, train, on=['item_id','shop_id','date_block_num'], how='left').fillna(0.0)
if verbose:
print("\tcalculating shop-only and item-only total sales by month...")
shoponly_sales = pd.DataFrame(train.groupby(['date_block_num','shop_id'])['item_cnt_month'].transform('sum')).rename(columns={'item_cnt_month':'item_cnt_month_shoponly'})
itemonly_sales = pd.DataFrame(train.groupby(['date_block_num','item_id'])['item_cnt_month'].transform('sum')).rename(columns={'item_cnt_month':'item_cnt_month_itemonly'})
train = train.merge(shoponly_sales, left_index=True, right_index=True, how='inner')
train = train.merge(itemonly_sales, left_index=True, right_index=True, how='inner')
if verbose:
print("\tcalculating shop-only and item-only STD sales by month...")
shoponly_sales = pd.DataFrame(train.groupby(['date_block_num','shop_id'])['item_cnt_month'].transform('std')).rename(columns={'item_cnt_month':'item_cnt_month_std_shoponly'})
itemonly_sales = pd.DataFrame(train.groupby(['date_block_num','item_id'])['item_cnt_month'].transform('std')).rename(columns={'item_cnt_month':'item_cnt_month_std_itemonly'})
train = train.merge(shoponly_sales, left_index=True, right_index=True, how='inner')
train = train.merge(itemonly_sales, left_index=True, right_index=True, how='inner')
del shoponly_sales, itemonly_sales
gc.collect()
if verbose:
print("\tAdding item categories and super categories...")
#add item category and item super category to train
train = train.merge(items[['item_id','item_category_id']], on=['item_id'], how='left')
train = train.merge(categories[['item_category_id','super_category_id']], on=['item_category_id'], how='left')
if verbose:
print("\tCalculating cateogory and super-category quantities per month...")
#get prices by shop + item + month + category
avg_qty = pd.DataFrame(train.groupby(['item_category_id','shop_id','date_block_num'],
as_index=False).agg({'item_cnt_month':'sum'})).fillna(0.0)
avg_qty.columns = ['item_category_id','shop_id','date_block_num','item_category_cnt_month']
train = train.merge(avg_qty, on=['item_category_id','shop_id','date_block_num'], how='left').fillna(0.0)
avg_qty = pd.DataFrame(train.groupby(['super_category_id','shop_id','date_block_num'],
as_index=False).agg({'item_cnt_month':'sum'})).fillna(0.0)
avg_qty.columns = ['super_category_id','shop_id','date_block_num','super_category_cnt_month']
train = train.merge(avg_qty, on=['super_category_id','shop_id','date_block_num'], how='left').fillna(0.0)
if verbose:
print("\tCleaning up temporary objects...")
del avg_qty, train_temp
gc.collect()
train = self.__downcast(train)
return train
def __orderTrainTest(self, train, test, cols):
train = train[cols]
test = test[cols]
return train, test
def __populateTestItemCategories(self, test, items, categories, verbose=False):
#populate item prices for the test set based on previous item + shop combos in the dataset
#additionally, add item category to the end
if verbose:
print("\tadding category and super category to TEST data...")
#add item category and item super category to train
test = test.merge(items[['item_id','item_category_id']], on=['item_id'], how='left')
test = test.merge(categories[['item_category_id','super_category_id']], on=['item_category_id'], how='left')
test = test.set_index('tuple_id')
test['date_block_num'] = np.int32(34)
test = self.__downcast(test)
return test
def __massParallelPeriodShift (self, train, test, pp_range = [1,2,3,6,12,24], shift_cols = ['item_cnt_month'], encode=False, encode_type='mean', verbose=False, clipping=False):
#iterate through a mass list of columns to get parallel period shift values
z_iter = np.array(list(product(*[pp_range, shift_cols])))
drop_labs = list(train.columns.difference(test.columns))
test = test.reset_index()
drop_labs.append('is_train')
train['is_train'] = np.int8(1)
test['is_train'] = np.int8(0)
mrg = | pd.concat([train,test],axis=0) | pandas.concat |
import pandas as pd
import glob
data_path = 'E:/GenderClassification/PycharmProjects/GenderClassification/home/abeer/Dropbox/Dataset_HAR project/*'
addrs = glob.glob(data_path)
for i in addrs:
folders = glob.glob(i + '/Walk/Esphalt/Alone/*')
for j in folders:
csv_files = glob.glob(j + '/*')
LUA = pd.read_csv('initAcc.csv')
RC = pd.read_csv('initAcc.csv')
LC = pd.read_csv('initAcc.csv')
back = pd.read_csv('initAcc.csv')
waist = pd.read_csv('initAcc.csv')
RUA = pd.read_csv('initAcc.csv')
LeftWatch = pd.read_csv('initAcc.csv')
RightWatch = pd.read_csv('initAcc.csv')
for k in csv_files:
if '(1)' in k or '(2)' in k or '(3)' in k or '(4)' in k or '(5)' in k:
continue
elif 'Accelerometer' in k and 'F5-RC' in k:
file = pd.read_csv(k)
RC = RC.append(file.iloc[:, 3:])
RC = RC.reset_index(drop=True)
print(RC.columns)
elif 'Accelerometer' in k and "DE-Waist" in k:
file = pd.read_csv(k)
waist = waist.append(file.iloc[:, 3:])
waist = waist.reset_index(drop=True)
elif 'Accelerometer' in k and "D5-LC" in k:
file = pd.read_csv(k)
LC = LC.append(file.iloc[:, 3:])
LC = LC.reset_index(drop=True)
elif 'Accelerometer' in k and "D2-RUA" in k:
file = pd.read_csv(k)
RUA = RUA.append(file.iloc[:, 3:])
RUA = RUA.reset_index(drop=True)
elif 'Accelerometer' in k and "C6-back" in k:
file = pd.read_csv(k)
back = back.append(file.iloc[:, 3:])
back = back.reset_index(drop=True)
elif 'Accelerometer' in k and "C5-LUA" in k:
file = pd.read_csv(k)
LUA = LUA.append(file.iloc[:, 3:])
LUA = LUA.reset_index(drop=True)
for k in csv_files:
if '(1)' in k or '(2)' in k or '(3)' in k or '(4)' in k or '(5)' in k:
continue
elif 'Gyroscope' in k and 'F5-RC' in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
RC = pd.concat([RC, file], axis=1)
print(RC.columns)
print(RC.info())
elif 'Gyroscope' in k and "DE-Waist" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
waist = pd.concat([waist, file], axis=1)
elif 'Gyroscope' in k and "D5-LC" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
LC = pd.concat([LC, file], axis=1)
elif 'Gyroscope' in k and "D2-RUA" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
RUA = pd.concat([RUA, file], axis=1)
elif 'Gyroscope' in k and "C6-back" in k:
file = pd.read_csv(k)
file = file.iloc[:, 3:]
back = | pd.concat([back, file], axis=1) | pandas.concat |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.seoulmate.com.tw/catalog.php?m=115&s=249&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 33):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/p[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul/li[%i]/p[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=115&s=249&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//ul/li[%i]/a/img[1]" % (i,)).get_attribute('src')
if(pic_link == ""):
i += 1
if(i == 33):
p += 1
continue
except:
i += 1
if(i == 33):
p += 1
continue
try:
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
sale_price = sale_price.strip('NT.')
locate = sale_price.find("NT.")
sale_price = sale_price[locate+3:len(sale_price)]
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 33):
p += 1
continue
i += 1
if(i == 33):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sweesa():
shop_id = 55
name = 'sweesa'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sweesa.com/Shop/itemList.aspx?&m=20&o=5&sa=1&smfp=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 45):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=20", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('TWD.')
ori_price = ""
except:
i += 1
if(i == 45):
p += 1
continue
i += 1
if(i == 45):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pazzo():
shop_id = 56
name = 'pazzo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pazzo.com.tw/recent?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div[@class='item__images']/a/picture/img[@class='img-fluid']" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Meierq():
shop_id = 57
name = 'meierq'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
page = 0
prefix_urls = [
"https://www.meierq.com/zh-tw/category/bottomclothing?P=",
"https://www.meierq.com/zh-tw/category/jewelry?P=",
"https://www.meierq.com/zh-tw/category/outerclothing?P=",
"https://www.meierq.com/zh-tw/category/accessories?P=",
]
for prefix in prefix_urls:
page += 1
for i in range(1, page_Max):
url = f"{prefix}{i}"
try:
print(url)
chrome.get(url)
chrome.find_element_by_xpath("//div[@class='items__image']")
except:
print("find_element_by_xpath_break", page)
if(page == 4):
chrome.quit()
print("break")
break
break
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div/p/a" % (i,)).text
except:
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div/p/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "n/")
page_id = page_id[:page_id.find("?c")]
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div/img" % (i,)).get_attribute('src')
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Harper():
shop_id = 58
name = 'harper'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
while True:
url = "https://www.harper.com.tw/Shop/itemList.aspx?&m=13&smfp=" + \
str(p)
if(p > 20):
chrome.quit()
break
try:
chrome.get(url)
except:
chrome.quit()
break
i = 1
while(i < 80):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
p += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
page_id = stripID(page_link, "cno=")
page_id = page_id.replace("&m=13", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 79):
p += 1
continue
i += 1
if(i == 79):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lurehsu():
shop_id = 59
name = 'lurehsu'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lurehsu.com/zh-TW/lure/productList?item1=00&item2=16&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 28):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("SaleID=")
page_id = page_id[:page_id.find("&Color")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 28):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NTD.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = ""
except:
i += 1
if(i == 28):
p += 1
continue
i += 1
if(i == 28):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pufii():
shop_id = 61
name = 'pufii'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pufii.com.tw/Shop/itemList.aspx?&m=6&smfp=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=P", "")
page_id = page_id.replace("&m=6", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[2]" % (i,)).text
sale_price = sale_price.strip('活動價NT')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT')
except:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mouggan():
shop_id = 62
name = 'mouggan'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mouggan.com/zh-tw/category/ALL-ITEM?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//a[@class='close p-0']/i[@class='icon-popup-close']").click()
except:
pass
i = 1
while(i < 19):
try:
title = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[1]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 19):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 19):
p += 1
continue
i += 1
if(i == 19):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mercci():
shop_id = 64
name = 'mercci'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mercci22.com/zh-tw/tag/HOTTEST?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
# chrome.find_element_by_xpath("//a[@class='close p-0']/i[@class='icon-popup-close']").click()
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a[@class='items__image js-loaded']/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']" % (i,)).text
sale_price = sale_price.strip('NT.')
k = sale_price.find("NT.")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/p[@class='price']/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sivir():
shop_id = 65
name = 'sivir'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sivir.com.tw/collections/new-all-%E6%89%80%E6%9C%89?page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@data-id]" % (i,)).get_attribute('data-id')
pic_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[1]/a/img" % (i,)).get_attribute('data-src')
pic_link = f"https:{pic_link}"
sale_price = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.replace('NT$', '')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nana():
shop_id = 66
name = 'nana'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.2nana.tw/product.php?page=" + \
str(p) + "&cid=1#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 75):
try:
title = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a/img" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 75):
p += 1
continue
i += 1
if(i == 75):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aachic():
shop_id = 70
name = 'aachic'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.aachic.com/categories/all-%E6%89%80%E6%9C%89%E5%95%86%E5%93%81?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lovso():
shop_id = 71
name = 'lovso'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lovso.com.tw/Shop/itemList.aspx?m=8&o=0&sa=0&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/center/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=8", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Bowwow():
shop_id = 72
name = 'bowwow'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.bowwowkorea.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Suitangtang():
shop_id = 74
name = 'suitangtang'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
i = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.suitangtang.com/Catalog/WOMAN"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
while(True):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[@class='name']" % (i,)).text
k = title.find("NT$")
title = title[0:k-1]
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "/Product/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a/img" % (i,)).get_attribute('data-original')
except:
i += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
k = sale_price.find("NT$")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span/span" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
continue
i += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Chochobee():
shop_id = 78
name = 'chochobee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.chochobee.com/catalog.php?m=40&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=40&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Asobi():
shop_id = 80
name = 'asobi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.asobi.com.tw/Shop/itemList.aspx?undefined&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 34):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&&m=1&o=5&sa=1", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]/div/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 34):
p += 1
continue
i += 1
if(i == 34):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Kiyumi():
shop_id = 81
name = 'kiyumi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.kiyumishop.com/catalog.php?m=73&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=73&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Genquo():
shop_id = 82
name = 'genquo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.genquo.com/zh-tw/category/women?P=" + str(p)
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path + '?' + make_id.query
page_id = page_id.lstrip("/zh-tw/market/n/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Oolala():
shop_id = 86
name = 'oolala'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.styleoolala.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
flag += 1
print(p, i)
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pattis():
shop_id = 87
name = 'pattis'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.i-pattis.com/catalog.php?m=1&s=21&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=1&s=21&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/del" % (i,)).text
ori_price = ori_price.strip('NT.$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Scheminggg():
shop_id = 90
name = 'scheminggg'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.scheminggg.com/productlist?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a[1][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("/products?saleid=")
page_id = page_id.rstrip("&colorid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/img" % (i,)).get_attribute('src')
if (pic_link == ""):
i += 1
if(i == 37):
p += 1
continue
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Laconic():
shop_id = 94
name = 'laconic'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
i = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://laconic.waca.ec/product/all"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(True):
try:
title = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//h4" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
find_href = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//a/span" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.replace('url("', '')
pic_link = pic_link.replace('")', '')
sale_price = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//li/span" % (i,)).text
sale_price = sale_price.strip('$')
ori_price = ""
except:
i += 1
if(i % 10 == 1):
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
continue
i += 1
if(i % 10 == 1):
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pixelcake():
shop_id = 96
name = 'pixelcake'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pixelcake.com.tw/zh-tw/category/ALL?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//button[@class='aiq-2-w6Qa']").click()
chrome.find_element_by_xpath(
"//i[@class='icon-popup-close']").click()
except:
pass
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']/div[1]/div[%i]/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']/div[1]/div[%i]/div[2]//div[@class='like-counter ']" % (i,)).get_attribute('data-custommarketid')
pic_link = chrome.find_element_by_xpath(
"//div[%i]/div[1]/a/picture/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/div[5]/div[2]/div[1]/span[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Miyuki():
shop_id = 97
name = 'miyuki'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.miyukiselect.com/zh-tw/category/ALL-ITEMS?P=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//button[@class='aiq-2-w6Qa']").click()
chrome.find_element_by_xpath(
"//i[@class='icon-popup-close']").click()
except:
pass
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/a" % (i,)).text
if (title == ""):
i += 1
if(i == 17):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']/div[1]/div[%i]/div[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//div[%i]/div[1]/a/picture/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Percha():
shop_id = 99
name = 'percha'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = | pd.DataFrame() | pandas.DataFrame |
"""
Script reads the csv file describing the details of people requiring help.
"""
__author__ = "<NAME>"
__license__ = "MIT"
__version__ = "1.0.1"
# imports
import pandas as pd
import numpy as np
import os
import re
class DataReader:
def __init__(self, filename):
self.filename = filename
self.df = self._read_file()
self.df_filtered = pd.DataFrame()
def _read_file(self):
df = | pd.read_json(self.filename) | pandas.read_json |
import pandas as pd
import numpy as np
import sys, logging
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import re
import os
import bisect
import io
import logging
import re
import requests
from ast import literal_eval
from datetime import datetime, timedelta
from html.parser import HTMLParser
from typing import List, Dict
# This module provides methods that handles MTA turnstile data
def _process_raw_data(raw_data: pd.DataFrame, group_by: List[str]) -> pd.DataFrame:
logging.getLogger().info("Cleaning turnstile data")
# create datetime from DATE and TIME columns
processed = raw_data.assign(
datetime=pd.to_datetime(
raw_data['DATE'] + " " + raw_data['TIME'],
format="%m/%d/%Y %H:%M:%S"))
# remove mysterious duplicate index along STATION + UNIT
processed = processed.groupby(
group_by + ['datetime']).sum().reset_index()
processed = processed.set_index(pd.DatetimeIndex(processed.datetime))
processed.drop(columns=['datetime'], inplace=True)
# clean up whitespace in the columns
processed.rename(columns={c: c.strip()
for c in processed.columns}, inplace=True)
return processed
def _process_grouped_data(grouped: pd.DataFrame,
frequency: str) -> pd.DataFrame:
# calculate the diff and take the absolute value
entry_diffs = grouped.ENTRIES.diff()
exit_diffs = grouped.EXITS.diff()
# clean up data
# grouped.loc[entry_diffs < 0, 'entry_diffs'] = np.nan
# grouped.loc[exit_diffs < 0, 'exit_diffs'] = np.nan
# grouped.loc[entry_diffs > 10000, 'entry_diffs'] = np.nan
# grouped.loc[exit_diffs > 10000, 'exit_diffs'] = np.nan
entry_diffs = pd.Series([np.nan if (x < 0)|(x>10000) else x for x in entry_diffs])
exit_diffs = | pd.Series([np.nan if (x < 0)|(x>10000) else x for x in exit_diffs]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 8 11:31:39 2019
@author: <NAME>
"""
import pandas as pd
import merge
import descriptors
data_a = pd.read_csv('data/dataset-A.csv', header=0)
data_b = | pd.read_csv('data/dataset-B.csv', header=0) | pandas.read_csv |
"""Tests for `models` module."""
import pytest
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from pipelitools.models import models as m
@pytest.fixture(scope="function")
def df_binary():
X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=1)
X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=2, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.5, 0.5], random_state=2)
y_train = pd.Series(y_train)
y_test = pd.Series(y_test)
return X_train, y_train, X_test, y_test
@pytest.fixture(scope="function")
def df_multiclass():
X_train, y_train = make_classification(n_samples=100, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.2, 0.3, 0.5], random_state=1)
X_test, y_test = make_classification(n_samples=50, n_features=2, n_informative=2,
n_redundant=0, n_repeated=0, n_classes=3, n_clusters_per_class=1,
class_sep=2, flip_y=0, weights=[0.3, 0.3, 0.4], random_state=2)
y_train = pd.Series(y_train)
y_test = | pd.Series(y_test) | pandas.Series |
import numpy as np
import pandas as pd
import glob
import warnings
import scipy
from scipy.stats import kurtosis, skew
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import mutual_info_score,accuracy_score
def numeric_impute(data, num_cols, method):
num_data = data[num_cols]
if method == 'mode':
output = num_data.fillna(getattr(num_data, method)().iloc[0])
else:
output = num_data.fillna(getattr(num_data, method)())
return output
def dict_merge(*args):
imp = {}
for dictt in args:
imp.update(dictt)
return imp
def summary_stats(data, include_quantiles = False):
quantiles = np.quantile(data,[0, 0.25, 0.75, 1])
minn = quantiles[0]
maxx = quantiles[-1]
q1 = quantiles[1]
q3 = quantiles[2]
mean = np.mean(data)
std = np.std(data)
if include_quantiles:
return minn, q1, mean, std, q3, maxx
else:
return minn, mean, std, maxx
def pair_corr(data):
cors = abs(data.corr().values)
cors = np.triu(cors,1).flatten()
cors = cors[cors != 0]
return cors
def calc_MI(x, y, bins):
c_xy = np.histogram2d(x, y, bins)[0]
mi = mutual_info_score(None, None, contingency=c_xy)
return mi
def MI(X, y):
bins = 10
# check if X and y have the same length
n = X.shape[1]
matMI = np.zeros(n)
for ix in np.arange(n):
matMI[ix] = calc_MI(X.iloc[:,ix], y, bins)
return matMI
def preprocessing(data):
X = data.iloc[:, :-1]
# selecting the response variable
y = data.iloc[:, -1]
# one-hot encoding
X = pd.get_dummies(X)
le = LabelEncoder()
y = le.fit_transform(y)
return X, y
def meta_features(data, num_cols, categorical_cols):
metafeatures = {}
target_variable = data.iloc[:, -1]
nr_classes = target_variable.nunique()
metafeatures['nr_classes'] = nr_classes
nr_instances = data.shape[0]
metafeatures['nr_instances'] = nr_instances
log_nr_instances = np.log(nr_instances)
metafeatures['log_nr_instances'] = log_nr_instances
nr_features = data.shape[1]
metafeatures['nr_features'] = nr_features
log_nr_features = np.log(nr_features)
metafeatures['log_nr_features'] = log_nr_features
missing_val = data.isnull().sum().sum() + data.isna().sum().sum()
#metafeatures['missing_val'] = missing_val
# Ratio of Missing Values
ratio_missing_val = missing_val / data.size
# metafeatures['ratio_missing_val'] = ratio_missing_val
# Number of Numerical Features
nr_numerical_features = len(num_cols)
# metafeatures['nr_numerical_features'] = nr_numerical_features
# Number of Categorical Features
nr_categorical_features = len(categorical_cols)
#metafeatures['nr_categorical_features'] = nr_categorical_features
# print(data[num_cols].nunique() / data[num_cols].count())
# Ratio of Categorical to Numerical Features
if nr_numerical_features != 0:
ratio_num_cat = nr_categorical_features / nr_numerical_features
else:
ratio_num_cat = 'NaN'
# metafeatures['ratio_num_cat'] = ratio_num_cat
# Dataset Ratio
dataset_ratio = nr_features / nr_instances
metafeatures['dataset_ratio'] = dataset_ratio
# Categorical Features Statistics
if nr_categorical_features != 0:
labels = data[categorical_cols].nunique()
# Labels Sum
labels_sum = np.sum(labels)
# Labels Mean
labels_mean = np.mean(labels)
# Labels Std
labels_std = np.std(labels)
else:
labels_sum = 0
labels_mean = 0
labels_std = 0
# metafeatures['labels_sum'] = labels_sum
#metafeatures['labels_mean'] = labels_mean
#metafeatures['labels_std'] = labels_std
return metafeatures
def meta_features2(data, num_cols):
metafeatures = {}
nr_numerical_features = len(num_cols)
if nr_numerical_features != 0:
skewness_values = abs(data[num_cols].skew())
kurtosis_values = data[num_cols].kurtosis()
skew_min, skew_q1, \
skew_mean, skew_std, \
skew_q3, skew_max = summary_stats(skewness_values,
include_quantiles=True)
kurtosis_min, kurtosis_q1, \
kurtosis_mean, kurtosis_std, \
kurtosis_q3, kurtosis_max = summary_stats(kurtosis_values,
include_quantiles=True)
pairwise_correlations = pair_corr(data[num_cols])
try:
rho_min, rho_mean, \
rho_std, rho_max = summary_stats(pairwise_correlations)
except IndexError:
pass
var_names = ['skew_min', 'skew_std', 'skew_mean','skew_q1', 'skew_q3', 'skew_max',
'kurtosis_min', 'kurtosis_std','kurtosis_mean', 'kurtosis_q1','kurtosis_q3', 'kurtosis_max',
'rho_min', 'rho_max', 'rho_mean','rho_std']
for var in var_names:
try:
metafeatures[var] = eval(var)
except NameError:
metafeatures[var] = 0
return metafeatures
def shan_entropy(c):
c_normalized = c[np.nonzero(c)[0]]
H = -sum(c_normalized* np.log2(c_normalized))
return H
def norm_entropy(X):
bins = 10
nr_features = X.shape[1]
n = X.shape[0]
H = np.zeros(nr_features)
for i in range(nr_features):
x = X.iloc[:,i]
cont = len(np.unique(x)) > bins
if cont:
# discretizing cont features
x_discr = np.histogram(x, bins)[0]
x_norm = x_discr / float(np.sum(x_discr))
H_x = shan_entropy(x_norm)
else:
x_norm = x.value_counts().values / n
H_x = shan_entropy(x_norm)
H[i] = H_x
H /= np.log2(n)
return H
def meta_features_info_theoretic(X, y):
metafeatures = {}
nr_instances = X.shape[0]
# Class Entropy
class_probs = np.bincount(y) / nr_instances
class_entropy = shan_entropy(class_probs)
metafeatures['class_entropy'] = class_entropy
# Class probability
metafeatures['prob_min'], \
metafeatures['prob_mean'], \
metafeatures['prob_std'], \
metafeatures['prob_max'] = summary_stats(class_probs)
# Norm. attribute entropy
H = norm_entropy(X)
metafeatures['norm_entropy_min'], \
metafeatures['norm_entropy_mean'], \
metafeatures['norm_entropy_std'], \
metafeatures['norm_entropy_max'] = summary_stats(H)
# Mutual information
mutual_information = MI(X, y)
metafeatures['mi_min'], \
metafeatures['mi_mean'], \
metafeatures['mi_std'], \
metafeatures['mi_max'] = summary_stats(mutual_information)
# Equiv. nr. of features
metafeatures['equiv_nr_feat'] = metafeatures['class_entropy'] / metafeatures['mi_mean']
# Noise-signal ratio
noise = metafeatures['norm_entropy_mean'] - metafeatures['mi_mean']
metafeatures['noise_signal_ratio'] = noise / metafeatures['mi_mean']
return metafeatures
class LandmarkerModel:
def __init__(self, model, X_train, y_train, X_test, y_test):
self.model = model
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
def accuracy(self):
self.model.fit(self.X_train, self.y_train)
predictions = self.model.predict(self.X_test)
CV_accuracy = accuracy_score(self.y_test, predictions)
return CV_accuracy
def meta_features_landmarkers(X, y):
metafeatures = {}
k = 10
kf = StratifiedKFold(n_splits=k, shuffle=True)
model_1nn = KNeighborsClassifier(n_neighbors=1)
model_dt = DecisionTreeClassifier()
model_gnb = GaussianNB()
model_lda = LinearDiscriminantAnalysis()
CV_accuracy_1nn = 0
CV_accuracy_dt = 0
CV_accuracy_gnb = 0
CV_accuracy_lda = 0
for train_index, test_index in kf.split(X, y):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
CV_accuracy_1nn += LandmarkerModel(model_1nn, X_train, y_train, X_test, y_test).accuracy()
CV_accuracy_dt += LandmarkerModel(model_dt, X_train, y_train, X_test, y_test).accuracy()
CV_accuracy_gnb += LandmarkerModel(model_gnb, X_train, y_train, X_test, y_test).accuracy()
try:
CV_accuracy_lda += LandmarkerModel(model_lda, X_train, y_train, X_test, y_test).accuracy()
except scipy.linalg.LinAlgError:
pass
CV_accuracy_1nn /= k
CV_accuracy_dt /= k
CV_accuracy_gnb /= k
CV_accuracy_lda /= k
metafeatures['Landmarker_1NN'] = CV_accuracy_1nn
metafeatures['Landmarker_dt'] = CV_accuracy_dt
metafeatures['Landmarker_gnb'] = CV_accuracy_gnb
metafeatures['Landmarker_lda'] = CV_accuracy_lda
return metafeatures
def all_metafeatures(data, num_cols, metafeatures1):
metafeatures2 = meta_features2(data, num_cols)
X, y = preprocessing(data)
metafeatures3 = meta_features_info_theoretic(X, y)
metafeatures4 = meta_features_landmarkers(X, y)
metafeatures = dict_merge(metafeatures1, metafeatures2,
metafeatures3, metafeatures4)
return metafeatures
def extract_metafeatures(file):
warnings.filterwarnings("ignore")
data = pd.read_csv(file,
index_col=None,
header=0,
sep = '[;,]',
na_values='?')
data.columns = map(str.lower, data.columns)
# removing an id column if exists
if 'id' in data.columns:
data = data.drop('id', 1)
# remove constant columns
data = data.loc[:, (data != data.iloc[0]).any()]
const_col = data.std().index[data.std() == 0]
data = data.drop(const_col,axis=1)
# remove columns with only NaN values
empty_cols = ~data.isna().all()
data = data.loc[:, empty_cols]
cols = set(data.columns)
num_cols = set(data._get_numeric_data().columns)
categorical_cols = list(cols.difference(num_cols))
# data imputation for categorical features
categ_data = data[categorical_cols]
data[categorical_cols] = categ_data.fillna(categ_data.mode().iloc[0])
metafeatures1 = meta_features(data, num_cols, categorical_cols)
### Numerical Features Statistics
#missing_val = metafeatures1['missing_val']
missing_val=0
if missing_val != 0:
imputation_types = ['mean', 'median', 'mode']
imputed_data = data.copy()
results = pd.DataFrame()
for index, num_imput_type in enumerate(imputation_types):
num_cols = list(num_cols)
imputed_data[num_cols] = numeric_impute(data, num_cols, num_imput_type)
#metafeatures1['num_imput_type'] = num_imput_type
metafeatures = all_metafeatures(imputed_data, num_cols, metafeatures1)
df = | pd.DataFrame([metafeatures]) | pandas.DataFrame |
"""Create a synthetic population that is representative of Germany."""
from pathlib import Path
import numpy as np
import pandas as pd
import pytask
import sid
from sid.shared import factorize_assortative_variables
from src.config import BLD
from src.config import N_HOUSEHOLDS
from src.config import SRC
from src.create_initial_states.create_contact_model_group_ids import (
add_contact_model_group_ids,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_group,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_rank,
)
from src.prepare_data.task_prepare_rki_data import TRANSLATE_STATES
from src.shared import create_age_groups
from src.shared import create_age_groups_rki
_DEPENDENCIES = {
# py files
"sid_shared.py": Path(sid.__file__).parent.resolve() / "shared.py",
"shared.py": SRC / "shared.py",
"create_contact_model_group_ids": SRC
/ "create_initial_states"
/ "create_contact_model_group_ids.py",
"add_weekly_ids": SRC / "create_initial_states" / "add_weekly_ids.py",
"make_educ_group_columns": SRC
/ "create_initial_states"
/ "make_educ_group_columns.py",
"create_vaccination_priority": SRC
/ "create_initial_states"
/ "create_vaccination_priority.py",
"translations": SRC / "prepare_data" / "task_prepare_rki_data.py",
#
# data
"hh_data": SRC
/ "original_data"
/ "population_structure"
/ "microcensus2010_cf.dta",
"county_probabilities": BLD / "data" / "population_structure" / "counties.parquet",
"work_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_daily.pkl",
"work_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_weekly.pkl",
"other_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_daily.pkl",
"other_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_weekly.pkl",
"params": BLD / "params.pkl",
}
@pytask.mark.depends_on(_DEPENDENCIES)
@pytask.mark.parametrize(
"n_hhs, produces",
[
(N_HOUSEHOLDS, BLD / "data" / "initial_states.parquet"),
(100_000, BLD / "data" / "debug_initial_states.parquet"),
],
)
def task_create_initial_states_microcensus(depends_on, n_hhs, produces):
mc = pd.read_stata(depends_on["hh_data"])
county_probabilities = pd.read_parquet(depends_on["county_probabilities"])
work_daily_dist = pd.read_pickle(depends_on["work_daily_dist"])
work_weekly_dist = pd.read_pickle(depends_on["work_weekly_dist"])
other_daily_dist = pd.read_pickle(depends_on["other_daily_dist"])
other_weekly_dist = pd.read_pickle(depends_on["other_weekly_dist"])
params = pd.read_pickle(depends_on["params"])
no_vaccination_share = params.loc[
("vaccinations", "share_refuser", "share_refuser"), "value"
]
df = _build_initial_states(
mc=mc,
county_probabilities=county_probabilities,
work_daily_dist=work_daily_dist,
work_weekly_dist=work_weekly_dist,
other_daily_dist=other_daily_dist,
other_weekly_dist=other_weekly_dist,
n_households=n_hhs,
seed=3933,
no_vaccination_share=no_vaccination_share,
)
df.to_parquet(produces)
def _build_initial_states(
mc,
county_probabilities,
work_daily_dist,
work_weekly_dist,
other_daily_dist,
other_weekly_dist,
n_households,
seed,
no_vaccination_share,
):
mc = _prepare_microcensus(mc)
equal_probs = pd.DataFrame()
equal_probs["hh_id"] = mc["hh_id"].unique()
equal_probs["probability"] = 1 / len(equal_probs)
df = _sample_mc_hhs(mc, equal_probs, n_households=n_households, seed=seed)
county_and_state = _draw_counties(
hh_ids=df["hh_id"].unique(),
county_probabilities=county_probabilities,
seed=2282,
)
df = df.merge(county_and_state, on="hh_id", validate="m:1")
df = df.astype({"age": np.uint8, "hh_id": "category"})
df = df.sort_values("hh_id").reset_index()
df.index.name = "temp_index"
assert not df.index.duplicated().any()
df["occupation"] = _create_occupation(df)
df = add_contact_model_group_ids(
df,
work_daily_dist=work_daily_dist,
work_weekly_dist=work_weekly_dist,
other_daily_dist=other_daily_dist,
other_weekly_dist=other_weekly_dist,
seed=555,
)
adult_at_home = (df["occupation"].isin(["stays home", "retired"])) & (
df["age"] >= 18
)
df["adult_in_hh_at_home"] = adult_at_home.groupby(df["hh_id"]).transform(np.any)
df["educ_contact_priority"] = _create_educ_contact_priority(df)
df["vaccination_group"] = create_vaccination_group(states=df, seed=484)
df["vaccination_rank"] = create_vaccination_rank(
df["vaccination_group"], share_refuser=no_vaccination_share, seed=909
)
# This is uncorrelated with the work contact priority.
# This allows us to easily match the empirical compliance rate.
df["rapid_test_compliance"] = np.random.uniform(low=0, high=1, size=len(df))
df["quarantine_compliance"] = np.random.uniform(low=0, high=1, size=len(df))
# factorize group id columns
to_factorize = [col for col in df if "_group_id" in col]
for col in to_factorize:
df[col], _ = factorize_assortative_variables(df, [col])
df.index.name = "index"
df = _only_keep_relevant_columns(df)
np.random.seed(1337)
df = df.sample(frac=1).reset_index(drop=True)
return df
def _prepare_microcensus(mc):
rename_dict = {
"ef1": "east_west",
"ef3s": "district_id",
"ef4s": "hh_nr_in_district",
"ef20": "hh_size",
"ef29": "work_type",
"ef31": "hh_form",
"ef44": "age",
"ef46": "gender",
"ef149": "frequency_work_saturday",
"ef150": "frequency_work_sunday",
}
mc = mc.rename(columns=rename_dict)
mc = mc[rename_dict.values()]
mc["private_hh"] = mc["hh_form"] == "bevölkerung in privathaushalten"
# restrict to private households for the moment
mc = mc[mc["private_hh"]]
mc["gender"] = (
mc["gender"]
.replace({"männlich": "male", "weiblich": "female"})
.astype("category")
)
mc["age"] = mc["age"].replace({"95 jahre und älter": 96})
mc["age_group"] = create_age_groups(mc["age"])
mc["age_group_rki"] = create_age_groups_rki(mc)
# 53% no, 21% every now and then, 17% regularly, 9% all the time
work_answers = ["ja, ständig", "ja, regelmäßig"]
mc["work_saturday"] = mc["frequency_work_saturday"].isin(work_answers)
# 72% no, 14% every now and then, 10% regularly, 3% all the time
mc["work_sunday"] = mc["frequency_work_sunday"].isin(work_answers)
mc["hh_id"] = mc.apply(_create_mc_hh_id, axis=1)
mc["hh_id"] = | pd.factorize(mc["hh_id"]) | pandas.factorize |
import sys
import pandas as pd
sys.path.insert(0, '..')
from emspy.query import InsertQuery
from mock_connection import MockConnection
from mock_ems import MockEMS
if sys.version_info[0] == 2:
from mock import patch
else:
from unittest.mock import patch
@patch('emspy.query.query.EMS', MockEMS)
@patch('emspy.Connection', MockConnection)
def test_all_df_rows_exist_in_create_columns_without_schema_map():
ems_system = 'ems24-app'
connection = MockConnection(user='', pwd='')
entity_id = 'foo'
i_query = InsertQuery(connection, ems_system, entity_id)
dummy_df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [4, 5, 6]})
i_query.insert_df(dummy_df)
create_columns = i_query.get_create_columns()
create_columns = create_columns['createColumns']
# Rows should have been added in order.
i = 0
for idx, row in dummy_df.iterrows():
row_dict = row.to_dict()
j = 0
for item in row_dict.items():
col = item[0]
val = item[1]
create_columns_entry = create_columns[i][j]
assert (create_columns_entry['fieldId'] == col)
assert (create_columns_entry['value'] == val)
j = j + 1
i = i + 1
@patch('emspy.query.query.EMS', MockEMS)
@patch('emspy.Connection', MockConnection)
def test_all_create_columns_exist_in_df_without_schema_map():
ems_system = 'ems24-app'
connection = MockConnection(user='', pwd='')
entity_id = 'foo'
i_query = InsertQuery(connection, ems_system, entity_id)
dummy_df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [4, 5, 6]})
i_query.insert_df(dummy_df)
create_columns = i_query.get_create_columns()
create_columns = create_columns['createColumns']
# Rows should have been added in order.
i = 0
for row in create_columns:
row_df = dummy_df.iloc[i, :] # create_column row # should correspond to dataframe row #
j = 0
for item in row:
fieldId = item['fieldId']
value = item['value']
assert (value == row_df[fieldId])
j = j + 1
i = i + 1
@patch('emspy.query.query.EMS', MockEMS)
@patch('emspy.Connection', MockConnection)
def test_all_df_rows_exist_in_create_columns_with_schema_map():
ems_system = 'ems24-app'
connection = MockConnection(user='', pwd='')
entity_id = 'foo'
i_query = InsertQuery(connection, ems_system, entity_id)
dummy_df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [4, 5, 6]})
schema_map = {'A': '[-hub][A]', 'B': '[-hub][B]', 'C': '[-hub][C]'}
i_query.insert_df(dummy_df, schema_map=schema_map)
dummy_df = dummy_df.rename(columns=schema_map)
create_columns = i_query.get_create_columns()
create_columns = create_columns['createColumns']
# Rows should have been added in order.
i = 0
for idx, row in dummy_df.iterrows():
row_dict = row.to_dict()
j = 0
for item in row_dict.items():
col = item[0]
val = item[1]
create_columns_entry = create_columns[i][j]
assert (create_columns_entry['fieldId'] == col)
assert (create_columns_entry['value'] == val)
j = j + 1
i = i + 1
@patch('emspy.query.query.EMS', MockEMS)
@patch('emspy.Connection', MockConnection)
def test_all_create_columns_exist_in_df_with_schema_map():
ems_system = 'ems24-app'
connection = MockConnection(user='', pwd='')
entity_id = 'foo'
i_query = InsertQuery(connection, ems_system, entity_id)
dummy_df = | pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'c'], 'C': [4, 5, 6]}) | pandas.DataFrame |
import matplotlib.pyplot as plt
from pathlib import Path
import pandas as pd
import os
import numpy as np
def get_file_paths(file_directory):
file_paths = os.listdir(file_directory)
file_paths = list(filter(lambda f_path: os.path.isdir(file_directory / f_path), file_paths))
return file_paths
def plot_day(plot_directory, df_phases_day, sdp_name, start_time, df_comparison_values, plot_method, comparison_label):
sdp_directory = plot_directory / sdp_name
if not os.path.exists(sdp_directory):
os.makedirs(sdp_directory)
plt.figure(1)
plt.ylabel('Phases')
p_counter = 1
relevant_plot = False
transgressions_sum = 0
for df_p_day in df_phases_day:
if not df_p_day.empty:
transgressions = plot_method(df_p_day, p_counter)
transgressions_sum += transgressions
relevant_plot = relevant_plot or transgressions > 0
p_counter = p_counter + 1
if relevant_plot and not df_comparison_values.empty:
df_comparison_values.plot(figsize=(24, 6), linewidth=0.5, color='grey', label=comparison_label)
if relevant_plot:
legend = plt.legend(fontsize='x-large', loc='lower left')
for line in legend.get_lines():
line.set_linewidth(4.0)
plot_path = plot_directory / sdp_name / start_time
if relevant_plot:
plt.savefig(plot_path)
plt.close(1)
if transgressions_sum > 0:
print(start_time)
print(transgressions_sum)
return transgressions_sum
def plot_pickle_daywise(pickle_directory, plot_directory, plot_method, comparison_series_func):
transgression_sum = 0
nmbr_elements_sum = 0
file_paths = get_file_paths(pickle_directory)
print(file_paths)
for path in file_paths:
print(path)
comparison_label, df_comparison_values = comparison_series_func(path)
# df_mean_values = pd.read_pickle(pickle_directory/(path+'season_aggregation')).sort_index()
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
nmbr_elements_sum += sum(map(lambda df: df.shape[0], df_phases))
day = pd.Timedelta('1d')
min_date = min(list(map(lambda df: df.index.min(), df_phases))).date()
max_date = max(list(map(lambda df: df.index.max(), df_phases))).date()
print(min_date)
print(max_date)
for start_time in pd.date_range(min_date, max_date, freq='d'):
end_time = start_time + day
# df_day = df.loc[df.index>start_time and df.index<end_time, :]
df_phases_day = list(map(lambda df: df.loc[start_time:end_time], df_phases))
df_comparison_values_day = df_comparison_values.loc[start_time:end_time]
# print(start_time.date())
transgression_sum += plot_day(plot_directory, df_phases_day, path.name, str(start_time.date()),
df_comparison_values_day, plot_method, comparison_label)
return transgression_sum, nmbr_elements_sum
def plot_station_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("StationDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.StationDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanStationAverage", pd.read_pickle(pickle_directory / 'meanStationValues')
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_phase_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("PhaseDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_station_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.phase_dif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_station_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_season_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
# anomaly_threshold = 3.2270145810536146
plot_directory = base_plot_directory / ("SeasDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_season_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.SeasDif) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "meanSeasonalAverage", pd.read_pickle(
pickle_directory / (station_name + 'season_aggregation')).sort_index()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_season_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies(pickle_directory, base_plot_directory):
anomaly_threshold = 1.5
plot_directory = base_plot_directory / ("TrafoDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.Value.diff()) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_trafo_dif_anomalies_v2(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TrafoDif_v2_" + str(anomaly_threshold).replace(".", "_"))
def plot_trafo_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.trafo) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_trafo_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def plot_time_dif_anomalies(pickle_directory, base_plot_directory, anomaly_threshold):
plot_directory = base_plot_directory / ("TimeDif_" + str(anomaly_threshold).replace(".", "_"))
def plot_time_dif_v2(df_p_day, p_counter):
transgressions = list(np.where(abs(df_p_day.time_passed) > anomaly_threshold)[0])
df_p_day.Value.plot(figsize=(24, 6), linewidth=0.9, markevery=transgressions, marker='o',
markerfacecolor='black', label="phase" + str(p_counter))
return len(transgressions)
def comparison_series_func(station_name):
return "", pd.DataFrame()
transgression_sum, nmbr_elements_sum = plot_pickle_daywise(pickle_directory, plot_directory, plot_time_dif_v2,
comparison_series_func)
print(transgression_sum)
print(nmbr_elements_sum)
ratio = transgression_sum / nmbr_elements_sum
print(ratio)
f = open(plot_directory / str(ratio), "w+")
f.close()
def get_quintiles(pickle_directory, quantile):
file_paths = get_file_paths(pickle_directory)
print(file_paths)
aggregated_series = pd.Series()
for path in file_paths:
print(path)
path = pickle_directory / Path(path)
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
for df_p in df_phases:
ser = df_p.time_passed.reset_index(drop=True).abs()
aggregated_series = aggregated_series.append(ser, ignore_index=True)
threshold = aggregated_series.quantile(q=quantile)
print(threshold)
return threshold
def show_df2(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 4, 16):pd.datetime(2017, 4, 17)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_p_dif_a)
print(df_p_h)
def show_df(pickle_name, pickle_dir=Path('pickles')):
path = pickle_dir / pickle_name
df_phases_h = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
# df_phases = list(map(lambda p: pd.read_pickle(path / ("phase" + p)), ['1', '2', '3']))
df_p_h = df_phases_h[0][['Value']].rename(columns={'Value': 'p1'}).loc[
pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p2'] = df_phases_h[1][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['p3'] = df_phases_h[2][['Value']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t1'] = df_phases_h[0][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t2'] = df_phases_h[1][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_h['t3'] = df_phases_h[2][['trafo']].loc[pd.datetime(2017, 8, 7):pd.datetime(2017, 8, 8)]
df_p_dif = pd.DataFrame()
df_p_dif['p1'] = df_p_h['p1'].diff() / df_p_h['p1'].index.to_series().diff().dt.total_seconds()
df_p_dif['p2'] = df_p_h['p2'].diff() / df_p_h['p2'].index.to_series().diff().dt.total_seconds()
df_p_dif['p3'] = df_p_h['p3'].diff() / df_p_h['p3'].index.to_series().diff().dt.total_seconds()
df_p_dif_a = df_p_dif.loc[abs(df_p_dif['p1']) >= 0.15].loc[abs(df_p_dif['p2']) >= 0.15].loc[
abs(df_p_dif['p3']) >= 0.15]
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print(df_p_dif_a)
print(df_p_h[['p1', 'p2', 'p3']])
def construct_overview2():
file_paths = os.listdir("./../pickles")
df_ps = []
for fp in file_paths:
path = Path("./../pickles") / fp
df_phases = list(map(lambda p: pd.read_pickle(path / ("h_phase" + p)), ['1', '2', '3']))
df_ps.append(df_phases)
df_table = pd.DataFrame(columns=["Messungszeitraum [d]", "MA Median [s]", "MA Mean [s]", "Max U [V]",
"Min U [V]", "Average U [V]"])
for df_phases in df_ps:
time_dif = | pd.Series() | pandas.Series |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
import gc
import os
import matplotlib.pyplot as plt
from collections import Counter
from functools import reduce
from sklearn.metrics import confusion_matrix
import itertools
from lightgbm import LGBMClassifier
import time
# import seaborn as sns
import eli5
from eli5.sklearn import PermutationImportance
# https://stackoverflow.com/questions/45230448/how-to-get-reproducible-result-when-running-keras-with-tensorflow-backend
def reset_state():
np.random.seed(42)
import random
random.seed(3)
os.environ["PYTHONHASHSEED"] = "0"
reset_state()
gc.enable()
training_set_metadata = pd.read_csv("../Data/training_set_metadata.csv")
columns_14 = [
"class_6",
"class_15",
"class_16",
"class_42",
"class_52",
"class_53",
"class_62",
"class_64",
"class_65",
"class_67",
"class_88",
"class_90",
"class_92",
"class_95",
]
columns_15 = [
"class_6",
"class_15",
"class_16",
"class_42",
"class_52",
"class_53",
"class_62",
"class_64",
"class_65",
"class_67",
"class_88",
"class_90",
"class_92",
"class_95",
"class_99",
]
training_set_selected = pd.read_csv("../Features_PT/augmented_all_features_v6.csv")
print("training set loaded")
print("total number of train objects:", len(training_set_selected))
training_set_car = pd.read_csv("../Features_New_Aug_Calc/augmented_car_features_v2.csv")
print("training_set_car loaded")
# assert len(training_set_car) == len(training_set_selected)
training_set_fits = pd.read_csv(
"../Features_New_Aug_Calc/augmented_fits_features_v2.csv"
)
print("training_set_fits loaded")
# assert len(training_set_fits) == len(training_set_selected)
training_set_cesium = pd.read_csv(
"../Features_New_Aug_Calc/augmented_cesium_features_v2.csv"
)
print("augmented_cesium_features_v2 loaded")
# assert len(training_set_cesium) == len(training_set_selected)
training_set_tanya_mag = pd.read_csv(
"../Features_New_Aug_Calc/augmented2_det_mag_features.csv"
)
# assert len(training_set_tanya_mag) == len(training_set_selected)
print("training_set_tanya_mag loaded")
training_set_my_6 = pd.read_csv(
"../Features_New_Aug_Calc/augmented_my6_features_v2.csv"
)
# assert len(training_set_my_6) == len(training_set_selected)
print("training set my6 loaded")
training_set_gauss = pd.read_csv(
"../Features_New_Aug_Calc/augmented_gauss_features_v2.csv"
)
# assert len(training_set_gauss) == len(training_set_selected)
print("training set gauss loaded")
training_set_supernova1 = pd.read_csv(
"../Features_New_Aug_Calc/augmented_supernova1_features_v2.csv"
)
# assert len(training_set_supernova1) == len(training_set_selected)
print("training set supernova1 loaded")
training_set_from_fits = pd.read_csv(
"../Features_New_Aug_Calc/aug_from_fit_features_v2.csv"
)
# assert len(training_set_from_fits) == len(training_set_selected)
print("training_set_from_fits loaded")
training_set_colors = pd.read_csv(
"../Features_colors/augmented_color_features_v2_clipped.csv"
)
# assert len(training_set_colors) == len(training_set_selected)
print("augmented_color_features_v2_clipped loaded")
training_set_periods = pd.read_csv("../Features_New_Aug_Calc/augemented_periods_v2.csv")
# assert len(training_set_periods) == len(training_set_selected)
print("augemented_periods_v2 loaded")
training_set_double_peak = pd.read_csv("../Features_PT/aug_exp_ratio_doublepeak.csv")
assert len(training_set_double_peak) == len(training_set_selected)
print("training_set_double_peak loaded")
for col in list(training_set_double_peak.columns):
if col == "object_id":
continue
training_set_double_peak.rename(columns={col: col + "_dp"}, inplace=True)
training_set_metadata = pd.read_csv("../Features_2/aug_meta.csv")
training_set_metadata["object_id"] = training_set_metadata["augmentation_id"]
training_set_metadata["aug_fold"] = training_set_metadata["augmentation_id"] % 100
assert len(training_set_metadata) == len(training_set_selected)
print("training set metadata loaded")
# best features for non-ddf
used_columns = [
"object_id",
"target",
"det_mjd_diff",
"hostgal_photoz",
"det_magn_min",
"det_magn_mean",
"cvec1",
"fm_0",
"flux_err_min",
"exp_ratio_fitting_loss",
"det_magn_std",
"gauss_err_5",
"flux_by_flux_ratio_sq_skew",
"gauss_s_2",
"cvec2",
"cvec5",
"tau_rise",
"fm_1",
"cvec3",
"fm_5",
"tau_fall",
"gauss_s_3",
"__median_absolute_deviation___5_",
"__median_absolute_deviation___2_",
"CAR_tau",
"fm_2",
"gauss_err_1",
"det_magn_mean_2",
"det_magn_mean_4",
"det_magn_mean_3",
"cvec0",
"detected_mean",
"gauss_err_2",
"my_6_certain_width",
"__skew___1_",
"__skew___2_",
"flux_diff2",
"gauss_s_4",
"gauss_s_5",
"my_6_est_width",
"__stetson_k___3_",
"cvec4",
"fm_4",
"gauss_s_1",
"__percent_close_to_median___2_",
"supernova1_s2_3",
"__qso_log_chi2_qsonu___0_",
"fm_3",
"det_magn_min_1",
"det_magn_min_0",
"det_magn_min_2",
"det_magn_min_4",
"det_magn_min_5",
"det_magn_min_3",
"__freq_varrat___1_",
"gauss_s_0",
"gauss_err_3",
"__freq_varrat___4_",
"flux_skew",
"gauss_err_4",
"magn_fit_4",
"magn_fit_2",
"magn_fit_1",
"magn_fm_5",
"magn_fm_4",
"magn_fm_1",
"gauss_err_0",
"time_score",
"det_magn_mean_5",
"g_r",
"g_i",
"r_z",
"r_i",
"g_z",
"u_r",
"i_z",
"fg_r",
"fg_i",
"fr_z",
"fr_i",
"fg_z",
"fu_r",
"fi_z",
]
used_columns.sort()
basic_list = list(training_set_double_peak.columns)
for col in basic_list:
if not col in used_columns:
print("Skipped:", col)
print("used_columns", used_columns)
print("training_set_selected", list(training_set_selected.columns))
full_train = training_set_selected
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_tanya_mag, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_my_6, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_gauss, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_supernova1, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_from_fits, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_colors, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
full_train = pd.merge(
left=full_train, right=training_set_periods, on="object_id", how="inner"
)
assert "target" in set(full_train.columns)
# full_train = pd.merge(left=full_train, right=training_set_metadata, on='object_id', how='inner')
full_train = pd.merge(
left=full_train, right=training_set_double_peak, on="object_id", how="inner"
)
assert len(full_train) == len(training_set_selected)
filter = (training_set_metadata["aug_fold"] == 0) | (training_set_metadata["ddf"] == 0)
non_dff_objects = training_set_metadata.loc[filter, "object_id"].values
print(len(non_dff_objects))
print("before", len(full_train))
full_train = full_train.loc[
full_train["object_id"].isin(non_dff_objects), :
].reset_index(drop=True)
# print(full_train)
print("after clean", len(full_train))
split_df = training_set_selected.copy()
split_df = split_df.loc[split_df["object_id"].isin(non_dff_objects), :]
split_df = split_df.reset_index(drop=True)
# print(split_df)
all_posible_columns = list(full_train.columns)
print("all_posible_columns len:", len(all_posible_columns))
print("used_columns len:", len(used_columns))
for column in used_columns:
if column not in all_posible_columns:
print("Achtung!!!", column)
full_train = full_train[used_columns]
if "target" in full_train:
y = full_train["target"]
del full_train["target"]
classes = sorted(y.unique())
# Taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with <NAME>'s post https://www.kaggle.com/kyleboone
class_weight = {c: 1 for c in classes}
for c in [64, 15]:
class_weight[c] = 2
print("Unique classes : ", classes)
full_train_columns = used_columns.copy()
full_train_columns.remove("object_id")
full_train_columns.remove("target")
print(full_train_columns)
print(len(full_train_columns))
# train_mean = full_train[full_train_columns].mean(axis=0)
# seems to be better
train_mean = 0
full_train[full_train_columns] = full_train[full_train_columns].fillna(train_mean)
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
def augmented_split(train, cv_folds, seed=1111):
np.random.seed(seed)
aug_id_y = train[["object_id", "target"]].copy()
aug_id_y["real_object_id"] = aug_id_y["object_id"] // 100
obj_id_y = aug_id_y[["real_object_id", "target"]].drop_duplicates()
old_gen = cv_folds.split(obj_id_y["real_object_id"], obj_id_y["target"])
for i in range(cv_folds.n_splits):
train_idx, validation_idx = next(old_gen)
train_idx = np.random.permutation(
aug_id_y[
aug_id_y["real_object_id"].isin(
obj_id_y.iloc[train_idx]["real_object_id"]
)
].index.values
)
validation_idx = np.random.permutation(
aug_id_y[
aug_id_y["real_object_id"].isin(
obj_id_y.iloc[validation_idx]["real_object_id"]
)
].index.values
)
yield train_idx, validation_idx
# check ddf
print("check ddf")
for fold_, (trn_, val_) in enumerate(augmented_split(split_df, folds)):
valid_objects = full_train.loc[val_, "object_id"].values
print(
len(
training_set_metadata[
(training_set_metadata["object_id"].isin(valid_objects))
& (training_set_metadata["ddf"] == 1)
]
)
)
class_weights_array = np.array(
[1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
)
def normalize_weigths(weights):
total_weight = 0.0
for ind in range(0, len(weights)):
total_weight += weights[ind]
return weights / total_weight
class_weights_array = normalize_weigths(class_weights_array) * len(class_weights_array)
print(class_weights_array)
n_channels = 6
n_classes = 14
def multi_weighted_logloss_chai(y_true, y_preds, classes, class_weights):
"""
refactor from
@author olivier https://www.kaggle.com/ogrellier
multi logloss for PLAsTiCC challenge
"""
y_p = y_preds.reshape(y_true.shape[0], len(classes), order="F")
# Trasform y_true in dummies
y_ohe = | pd.get_dummies(y_true) | pandas.get_dummies |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import pandas as pd
import warnings
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
def create_std_type(net, data, name, element="line", overwrite=True, check_required=True):
"""
Creates type data in the type database. The parameters that are used for
the loadflow have to be at least contained in data. These parameters are:
- c_nf_per_km, r_ohm_per_km, x_ohm_per_km and max_i_ka (for lines)
- sn_mva, vn_hv_kv, vn_lv_kv, vk_percent, vkr_percent, pfe_kw, i0_percent, shift_degree* (for transformers)
- sn_hv_mva, sn_mv_mva, sn_lv_mva, vn_hv_kv, vn_mv_kv, vn_lv_kv, vk_hv_percent, vk_mv_percent, vk_lv_percent, vkr_hv_percent, vkr_mv_percent, vkr_lv_percent, pfe_kw, i0_percent, shift_mv_degree*, shift_lv_degree* (for 3-winding-transformers)
additional parameters can be added and later loaded into pandapower with the function
"parameter_from_std_type".
\* only considered in loadflow if calculate_voltage_angles = True
The standard type is saved into the pandapower library of the given network by default.
INPUT:
**net** - The pandapower network
**data** - dictionary of standard type parameters
**name** - name of the standard type as string
**element** - "line", "trafo" or "trafo3w"
EXAMPLE:
>>> line_data = {"c_nf_per_km": 0, "r_ohm_per_km": 0.642, "x_ohm_per_km": 0.083, "max_i_ka": 0.142, "type": "cs", "q_mm2": 50, "alpha": 4.03e-3}
>>> pandapower.create_std_type(net, line_data, "NAYY 4×50 SE", element='line')
"""
if type(data) != dict:
raise UserWarning("type data has to be given as a dictionary of parameters")
if check_required:
if element == "line":
required = ["c_nf_per_km", "r_ohm_per_km", "x_ohm_per_km", "max_i_ka"]
elif element == "trafo":
required = ["sn_mva", "vn_hv_kv", "vn_lv_kv", "vk_percent", "vkr_percent",
"pfe_kw", "i0_percent", "shift_degree"]
elif element == "trafo3w":
required = ["sn_hv_mva", "sn_mv_mva", "sn_lv_mva", "vn_hv_kv", "vn_mv_kv", "vn_lv_kv",
"vk_hv_percent", "vk_mv_percent", "vk_lv_percent", "vkr_hv_percent",
"vkr_mv_percent", "vkr_lv_percent", "pfe_kw", "i0_percent", "shift_mv_degree",
"shift_lv_degree"]
else:
raise ValueError("Unkown element type %s" % element)
for par in required:
if par not in data:
raise UserWarning("%s is required as %s type parameter" % (par, element))
library = net.std_types[element]
if overwrite or not (name in library):
library.update({name: data})
def create_std_types(net, data, element="line", overwrite=True, check_required=True):
"""
Creates multiple standard types in the type database.
INPUT:
**net** - The pandapower network
**data** - dictionary of standard type parameter sets
**element** - "line", "trafo" or "trafo3w"
EXAMPLE:
>>> linetypes = {"typ1": {"r_ohm_per_km": 0.01, "x_ohm_per_km": 0.02, "c_nf_per_km": 10, "max_i_ka": 0.4, "type": "cs"},
>>> "typ2": {"r_ohm_per_km": 0.015, "x_ohm_per_km": 0.01, "c_nf_per_km": 30, "max_i_ka": 0.3, "type": "cs"}}
>>> pp.create_std_types(net, data=linetypes, element="line")
"""
for name, typdata in data.items():
create_std_type(net, data=typdata, name=name, element=element, overwrite=overwrite,
check_required=check_required)
def copy_std_types(to_net, from_net, element="line", overwrite=True):
"""
Transfers all standard types of one network to another.
INPUT:
**to_net** - The pandapower network to which the standard types are copied
**from_net** - The pandapower network from which the standard types are taken
**element** - "line" or "trafo"
**overwrite** - if True, overwrites standard types which already exist in to_net
"""
for name, typdata in from_net.std_types[element].items():
create_std_type(to_net, typdata, name, element=element, overwrite=overwrite)
def load_std_type(net, name, element="line"):
"""
Loads standard type data from the linetypes data base. Issues a warning if
linetype is unknown.
INPUT:
**net** - The pandapower network
**name** - name of the standard type as string
**element** - "line", "trafo" or "trafo3w"
OUTPUT:
**typedata** - dictionary containing type data
"""
library = net.std_types[element]
if name in library:
return library[name]
else:
raise UserWarning("Unknown standard %s type %s" % (element, name))
def std_type_exists(net, name, element="line"):
"""
Checks if a standard type exists.
INPUT:
**net** - pandapower Network
**name** - name of the standard type as string
**element** - type of element ("line" or "trafo")
OUTPUT:
**exists** - True if standard type exists, False otherwise
"""
library = net.std_types[element]
return name in library
def delete_std_type(net, name, element="line"):
"""
Deletes standard type parameters from database.
INPUT:
**net** - pandapower Network
**name** - name of the standard type as string
**element** - type of element ("line" or "trafo")
"""
library = net.std_types[element]
if name in library:
del library[name]
else:
raise UserWarning("Unknown standard %s type %s" % (element, name))
def available_std_types(net, element="line"):
"""
Returns all standard types available for this network as a table.
INPUT:
**net** - pandapower Network
**element** - type of element ("line" or "trafo")
OUTPUT:
**typedata** - table of standard type parameters
"""
std_types = pd.DataFrame(net.std_types[element]).T
try:
return std_types.infer_objects()
except AttributeError:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return std_types.convert_objects()
def parameter_from_std_type(net, parameter, element="line", fill=None):
"""
Loads standard types data for a parameter, which can be used to add an additional parameter,
that is not included in the original pandapower datastructure but is available in the standard
type database.
INPUT:
**net** - pandapower network
**parameter** - name of parameter as string
**element** - type of element ("line" or "trafo")
**fill** - fill-value that is assigned to all lines/trafos without
a value for the parameter, either because the line/trafo has no type or because the
type does not have a value for the parameter
EXAMPLE:
import pandapower as pp
import pandapower.networks as pn
net = pn.simple_mv_open_ring_net()
pp.parameter_from_std_type(net, "q_mm2")
"""
if parameter not in net[element]:
net[element][parameter] = fill
for typ in net[element].std_type.unique():
if | pd.isnull(typ) | pandas.isnull |
import datetime
import numpy
from numpy.testing import assert_array_equal
import pandas as pd
import pytest
import ipdb
import alpha_tech_tracker.technical_analysis as ta
from alpha_tech_tracker.wave import Wave
import alpha_tech_tracker.alpaca_engine as data_source
# nice print settings
pd.set_option('display.expand_frame_repr', False)
pd.options.display.max_rows = 999
def test_moving_average():
daily_price = {
"2010-01-01": 10,
"2010-01-02": 20,
"2010-01-03": 30,
"2010-01-04": 10,
"2010-01-05": 10,
"2010-01-06": 40
}
df = pd.DataFrame.from_dict(daily_price, orient='index').sort_index()
ma_df = ta.moving_average(2, df)
mv_key = 'mavg_2'
assert numpy.isnan(ma_df.iloc(0)[0].get(mv_key))
assert ma_df.iloc(0)[1].get(mv_key) == 15
assert ma_df.iloc(0)[5].get(mv_key) == 25
def test_moving_average_summary():
daily_price = {
"2010-01-01": 10,
"2010-01-02": 20,
"2010-01-03": 30,
"2010-01-04": 10,
"2010-01-05": 10,
"2010-01-06": 40
}
df = pd.DataFrame.from_dict(daily_price, orient='index').sort_index()
ma_df = ta.moving_average_summary([2, 3], df)
assert_array_equal(ma_df.columns, ['mavg_2', 'mavg_3'])
assert ma_df.iloc(0)[5].get(0) == 25
assert ma_df.iloc(0)[5].get(1) == 20
def test_detect_moving_average_trend():
df = | pd.read_csv('./tests/data/regn.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# OSeMOSYS-PLEXOS global model: Powerplant data
# Import modules
import pandas as pd
import os
pd.options.mode.chained_assignment = None # default='warn'
import numpy as np
import itertools
from urllib import request
PLEXOS_URL = "https://dataverse.harvard.edu/api/access/datafile/4008393?format=original&gbrecs=true"
PLEXOS_DATA = "PLEXOS_World_2015_Gold_V1.1.xlsx"
MODE_LIST = [1, 2]
def get_data(INPUT_PATH):
# Import data files and user input
# Checks whether PLEXOS-World 2015 data needs to be retrieved from the PLEXOS-World Harvard Dataverse.
path = os.path.join(INPUT_PATH, PLEXOS_DATA)
try:
workbook = open(path, 'rb')
except IOError:
request.urlretrieve(PLEXOS_URL, path)
workbook = open(path, 'rb')
finally:
df = pd.read_excel(workbook, sheet_name="Properties")
df_dict = pd.read_excel(workbook, sheet_name="Memberships")
workbook.close()
df_dict = df_dict[df_dict["parent_class"] == "Generator"].rename(
{"parent_object": "powerplant"}, axis=1
)
return df, df_dict
def create_main_generator_table(df):
# Create main generator table
gen_cols_1 = ["child_class", "child_object", "property", "value"]
df_gen = df[gen_cols_1]
df_gen = df_gen[df_gen["child_class"] == "Generator"]
df_gen.rename(columns={"child_object": "powerplant"}, inplace=True)
df_gen.drop("child_class", axis=1, inplace=True)
df_gen = pd.pivot_table(df_gen,
index="powerplant",
columns="property",
values="value",
aggfunc=np.sum,
fill_value=0,
)
df_gen["total_capacity"] = (df_gen["Max Capacity"].astype(float)) * (
df_gen["Units"].astype(int)
)
gen_cols_2 = ["Commission Date", "Heat Rate", "Max Capacity", "total_capacity"]
df_gen_2 = df_gen[gen_cols_2]
return df_gen_2
def compile_powerplants_nodes_fuels(df_dict):
## Compile dataframe with powerplants, nodes, and fuels
df_dict_fuel = df_dict[df_dict["collection"] == "Fuels"]
df_dict_fuel = df_dict_fuel[["powerplant", "child_object"]]
df_dict_nodes = df_dict[df_dict["collection"] == "Nodes"]
df_dict_nodes = df_dict_nodes[["powerplant", "child_object"]]
df_dict_2 = pd.merge(df_dict_fuel, df_dict_nodes, how="outer", on="powerplant")
return df_dict_2
def calculate_activity_ratios(thermal_fuel_list, region_name, thermal_fuel_list_iar, renewables_list,
df_gen_2, df, model_start_year, model_end_year, df_trn_efficiencies):
# Create master table for activity ratios
years = get_years(model_start_year, model_end_year)
df_ratios = ratio_master_table(df_gen_2, years)
# Calculate Input and OutputActivityRatio for: Power Generation
df_oar, df_oar_final = output_activity_ratios(df_ratios, thermal_fuel_list, region_name)
df_iar_final = input_activity_ratio(df_oar, thermal_fuel_list_iar, renewables_list, df_gen_2, region_name)
# Upstream,
df_oar_upstream = upstream_output_activity_ratios(df_iar_final, renewables_list)
# international markets,
df_oar_int = international_output_activity_ratios(df_oar_upstream)
df_iar_int = international_input_activity_ratio(df_oar_int)
# domestic transmission and
df_iar_trn = domestic_transmission_iar(df_oar_final)
df_oar_trn = domestic_transmission_oar(df_iar_trn)
# international transmission
df_int_trn = create_international_transmission(df, region_name, model_start_year, model_end_year)
df_int_trn_iar = international_transmission_iar(df_int_trn)
df_trn_efficiencies = transmission_efficiency(df_trn_efficiencies)
df_int_trn_oar = international_transmission_oar(df_int_trn, df_trn_efficiencies)
# Combine the pieces from above and output to csv:
df_oar_final = pd.concat([df_oar_final, df_oar_upstream, df_oar_int, df_oar_trn, df_int_trn_oar])
# Select columns for final output table
df_oar_final = df_oar_final.dropna()
df_oar_final = df_oar_final[['REGION', 'TECHNOLOGY', 'FUEL', 'MODE_OF_OPERATION', 'YEAR', 'VALUE']]
df_iar_final = pd.concat([df_iar_final, df_iar_int, df_iar_trn, df_int_trn_iar])
# Select columns for final output table
df_iar_final = df_iar_final.dropna()
df_iar_final = df_iar_final[['REGION', 'TECHNOLOGY', 'FUEL', 'MODE_OF_OPERATION', 'YEAR', 'VALUE']]
return df_oar_final, df_iar_final
def international_transmission_oar(df_int_trn, df_trn_efficiencies):
df_int_trn_oar = df_int_trn.copy()
# OAR Mode 2 is output to first country:
df_int_trn_oar.loc[df_int_trn_oar["MODE_OF_OPERATION"] == 2, "FUEL"] = (
"ELC" + df_int_trn_oar["TECHNOLOGY"].str[3:8] + "01"
)
# OAR Mode 1 is out to the second country:
df_int_trn_oar.loc[df_int_trn_oar["MODE_OF_OPERATION"] == 1, "FUEL"] = (
"ELC" + df_int_trn_oar["TECHNOLOGY"].str[8:13] + "01"
)
# and add values into OAR matrix
df_int_trn_oar = df_int_trn_oar.drop(["VALUE"], axis=1)
df_int_trn_oar = pd.merge(
df_int_trn_oar, df_trn_efficiencies, how="outer", on="TECHNOLOGY"
)
return df_int_trn_oar
def transmission_efficiency(df_trn_efficiencies):
# Drop unneeded columns
df_trn_efficiencies = df_trn_efficiencies.drop(
[
"Line",
"KM distance",
"HVAC/HVDC/Subsea",
"Build Cost ($2010 in $000)",
"Annual FO&M (3.5% of CAPEX) ($2010 in $000)",
"Unnamed: 8",
"Line Max Size (MW)",
"Unnamed: 10",
"Unnamed: 11",
"Unnamed: 12",
"Subsea lines",
],
axis=1,
)
# Drop NaN values
df_trn_efficiencies = df_trn_efficiencies.dropna(subset=["From"])
# Create To and From Codes:
# If from column has length 6 then it's the last three chars plus XX
df_trn_efficiencies.loc[df_trn_efficiencies["From"].str.len() == 6, "From"] = (
df_trn_efficiencies["From"].str[3:6] + "XX"
)
# If from column has length 9 then it's the 3:6 and 7:9 three chars plus XX
df_trn_efficiencies.loc[df_trn_efficiencies["From"].str.len() == 9, "From"] = (
df_trn_efficiencies["From"].str[3:6] + df_trn_efficiencies["From"].str[7:9]
)
# If from column has length 6 then it's the last three chars plus XX
df_trn_efficiencies.loc[df_trn_efficiencies["To"].str.len() == 6, "To"] = (
df_trn_efficiencies["To"].str[3:6] + "XX"
)
# If from column has length 9 then it's the 3:6 and 7:9 three chars plus XX
df_trn_efficiencies.loc[df_trn_efficiencies["To"].str.len() == 9, "To"] = (
df_trn_efficiencies["To"].str[3:6] + df_trn_efficiencies["To"].str[7:9]
)
# Combine From and To columns.
# If the From is earlier in the alphabet the technology is in order, add tech with mode 1.
df_trn_efficiencies["TECHNOLOGY"] = ("TRN" + df_trn_efficiencies["From"] + df_trn_efficiencies["To"])
# Drop to and from columns
df_trn_efficiencies = df_trn_efficiencies.drop(["From", "To"], axis=1)
# Rename column 'VALUES'
df_trn_efficiencies = df_trn_efficiencies.rename(columns={"Losses": "VALUE"})
# And adjust OAR values to be output amounts vs. losses:
df_trn_efficiencies['VALUE'] = 1.0 - df_trn_efficiencies['VALUE']
return df_trn_efficiencies
def international_transmission_iar(df_int_trn):
# Now create the input and output activity ratios
df_int_trn_iar = df_int_trn.copy()
# IAR Mode 1 is input from first country:
df_int_trn_iar.loc[df_int_trn_iar["MODE_OF_OPERATION"] == 1, "FUEL"] = (
"ELC" + df_int_trn_iar["TECHNOLOGY"].str[3:8] + "02"
)
# IAR Mode 2 is input from second country:
df_int_trn_iar.loc[df_int_trn_iar["MODE_OF_OPERATION"] == 2, "FUEL"] = (
"ELC" + df_int_trn_iar["TECHNOLOGY"].str[8:13] + "02"
)
return df_int_trn_iar
def create_international_transmission(df, region_name, model_start_year, model_end_year):
# Build international transmission system from original input data, but for Line rather than Generator:
int_trn_cols = ["child_class", "child_object", "property", "value"]
df_int_trn = df[int_trn_cols]
df_int_trn = df_int_trn[df_int_trn["child_class"] == "Line"]
# For IAR and OAR we can drop the value:
df_int_trn = df_int_trn.drop(["child_class", "value"], axis=1)
# Create MofO column based on property:
df_int_trn["MODE_OF_OPERATION"] = 1
df_int_trn.loc[df_int_trn["property"] == "Min Flow", "MODE_OF_OPERATION"] = 2
# Use the child_object column to build the technology names:
df_int_trn["codes"] = df_int_trn["child_object"].str.split(pat="-")
# If there are only two locations, then the node is XX
df_int_trn.loc[df_int_trn["codes"].str.len() == 2, "TECHNOLOGY"] = (
"TRN" + df_int_trn["codes"].str[0] + "XX" + df_int_trn["codes"].str[1] + "XX"
)
# If there are four locations, the node is already included
df_int_trn.loc[df_int_trn["codes"].str.len() == 4, "TECHNOLOGY"] = (
"TRN"
+ df_int_trn["codes"].str[0]
+ df_int_trn["codes"].str[1]
+ df_int_trn["codes"].str[2]
+ df_int_trn["codes"].str[3]
)
# If there are three items, and the last item is two characters, then the second item is an XX:
df_int_trn.loc[
(df_int_trn["codes"].str.len() == 3) & (df_int_trn["codes"].str[2].str.len() == 2),
"TECHNOLOGY",
] = (
"TRN"
+ df_int_trn["codes"].str[0]
+ "XX"
+ df_int_trn["codes"].str[1]
+ df_int_trn["codes"].str[2]
)
# If there are three items, and the last item is three characters, then the last item is an XX:
df_int_trn.loc[
(df_int_trn["codes"].str.len() == 3) & (df_int_trn["codes"].str[2].str.len() == 3),
"TECHNOLOGY",
] = (
"TRN"
+ df_int_trn["codes"].str[0]
+ df_int_trn["codes"].str[1]
+ df_int_trn["codes"].str[2]
+ "XX"
)
# Set the value (of either IAR or OAR) to 1
df_int_trn["VALUE"] = 1
df_int_trn["REGION"] = region_name
df_int_trn = df_int_trn.drop(["property", "child_object", "codes"], axis=1)
df_int_trn["YEAR"] = model_start_year
# Add in the years:
df_temp = df_int_trn.copy()
for year in range(model_start_year + 1, model_end_year + 1):
df_temp["YEAR"] = year
df_int_trn = df_int_trn.append(df_temp)
df_int_trn = df_int_trn.reset_index(drop=True)
return df_int_trn
def domestic_transmission_oar(df_iar_trn):
# OAR for transmission technologies is IAR, but the fuel is 02 instead of 01:
df_oar_trn = df_iar_trn.copy()
df_oar_trn["FUEL"] = df_oar_trn["FUEL"].str[0:8] + "02"
return df_oar_trn
def domestic_transmission_iar(df_oar_final):
# Build transmission system outputs
df_iar_trn = df_oar_final.copy()
# Change the technology name to PWRTRNXXXXX
df_iar_trn["TECHNOLOGY"] = "PWRTRN" + df_iar_trn["FUEL"].str[3:8]
# Make all modes of operation 1
df_iar_trn["MODE_OF_OPERATION"] = 1
# And remove all the duplicate entries
df_iar_trn.drop_duplicates(keep="first", inplace=True)
return df_iar_trn
def international_input_activity_ratio(df_oar_int):
# All we need to do is take in the thermal fuels for the MINXXXINT technologies. This already exists as df_oar_int with the XXINT fuel so we can simply copy that:
df_iar_int = df_oar_int.copy()
df_iar_int['FUEL'] = df_iar_int['FUEL'].str[0:3]
return df_iar_int
def international_output_activity_ratios(df_oar_upstream):
# Now we have to create the MINXXXINT technologies. They are all based on the MODE_OF_OPERATION == 2:
df_oar_int = pd.DataFrame(df_oar_upstream.loc[df_oar_upstream['MODE_OF_OPERATION'] == 2, :])
# At this point we should have only the internationally traded fuels since they're all mode 2. So we can make the tech MINXXXINT and that's that.
df_oar_int['TECHNOLOGY'] = 'MIN'+df_oar_int['FUEL']+'INT'
# And rename the fuel to XXXINT
df_oar_int['FUEL'] = df_oar_int['FUEL']+'INT'
df_oar_int['MODE_OF_OPERATION'] = 1 # This is probably not strictly necessary as long as they're always the same in and out...
# and de-duplicate this list:
df_oar_int.drop_duplicates(keep='first',inplace=True)
return df_oar_int
def upstream_output_activity_ratios(df_iar_final, renewables_list):
# #### OutputActivityRatios - Upstream
thermal_fuels = ['COA', 'COG', 'GAS', 'PET', 'URN', 'OIL', 'OTH']
# We have to create a technology to produce every fuel that is input into any of the power technologies:
df_oar_upstream = df_iar_final.copy()
# All mining and resource technologies have an OAR of 1...
df_oar_upstream['VALUE'] = 1
# Renewables - set the technology as RNW + FUEL
df_oar_upstream.loc[df_oar_upstream['FUEL'].str[0:3].isin(renewables_list),
'TECHNOLOGY'] = 'RNW'+df_oar_upstream['FUEL']
# If the fuel is a thermal fuel, we need to create the OAR for the mining technology... BUT NOT FOR THE INT FUELS...
df_oar_upstream.loc[df_oar_upstream['FUEL'].str[0:3].isin(thermal_fuels) & ~(df_oar_upstream['FUEL'].str[3:6] == "INT"),
'TECHNOLOGY'] = 'MIN'+df_oar_upstream['FUEL']
# Above should get all the outputs for the MIN technologies, but we need to adjust the mode 2 ones to just the fuel code (rather than MINCOAINT)
df_oar_upstream.loc[df_oar_upstream['MODE_OF_OPERATION']==2,
'TECHNOLOGY'] = 'MIN'+df_oar_upstream['FUEL'].str[0:3]+df_oar_upstream['TECHNOLOGY'].str[6:9]
df_oar_upstream.loc[df_oar_upstream['MODE_OF_OPERATION']==2,
'FUEL'] = df_oar_upstream['FUEL'].str[0:3]
# Now remove the duplicate fuels that the above created (because there's now a COA for each country, not each region, and GAS is repeated twice for each region as well):
df_oar_upstream.drop_duplicates(keep='first',inplace=True)
return df_oar_upstream
def input_activity_ratio(df_oar, thermal_fuel_list_iar, renewables_list, df_gen_2, region_name):
# #### InputActivityRatio - Power Generation Technologies
# Copy OAR table with all columns to IAR
df_iar = df_oar.copy()
df_iar['FUEL'] = 0
# Deal with GAS techs first... OCG and CCG
# OCG Mode 1: Domestic GAS
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 1) &
(df_iar['TECHNOLOGY'].str[3:6].isin(['OCG'])),
'FUEL'] = 'GAS'+df_iar['TECHNOLOGY'].str[6:9]
# OCG Mode 2: International GAS
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 2) &
(df_iar['TECHNOLOGY'].str[3:6].isin(['OCG'])),
'FUEL'] = 'GASINT'
# CCG Mode 1: Domestic GAS
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 1) &
(df_iar['TECHNOLOGY'].str[3:6].isin(['CCG'])),
'FUEL'] = 'GAS'+df_iar['TECHNOLOGY'].str[6:9]
# CCG Mode 2: International GAS
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 2) &
(df_iar['TECHNOLOGY'].str[3:6].isin(['CCG'])),
'FUEL'] = 'GASINT'
# For non-GAS thermal fuels, domestic fuel input by country in mode 1 and
# 'international' fuel input in mode 2
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 1) &
(df_iar['TECHNOLOGY'].str[3:6].isin(thermal_fuel_list_iar)),
'FUEL'] = df_iar['TECHNOLOGY'].str[3:9]
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 2) &
(df_iar['TECHNOLOGY'].str[3:6].isin(thermal_fuel_list_iar)),
'FUEL'] = df_iar['TECHNOLOGY'].str[3:6] + 'INT'
# For renewable fuels, input by node in mode 1
df_iar.loc[(df_iar['MODE_OF_OPERATION'] == 1) &
(df_iar['TECHNOLOGY'].str[3:6].isin(renewables_list)),
'FUEL'] = df_iar['TECHNOLOGY'].str[3:11]
# Remove mode 2 when not used
df_iar = df_iar.loc[df_iar['FUEL'] != 0]
# ### Calculate average InputActivityRatio by node+technology and only by technology
df_eff_node, df_eff_tech = average_inputactivityratio_by_node_tech(df_gen_2)
# Join efficiency columns: one with node and technology average, and the
# other with technology average
df_iar = df_iar.join(df_eff_node.set_index(['tech_code', 'node_code']),
on=['tech_code', 'node_code'])
df_iar = df_iar.join(df_eff_tech.set_index('tech_code'),
on='tech_code')
# When available, choose node and technology average. Else,
# choose technology average
df_iar['VALUE'] = df_iar['node_average_iar']
df_iar.loc[df_iar['VALUE'].isna(),
'VALUE'] = df_iar['tech_average_iar']
# Add 'REGION' column and fill 'GLOBAL' throughout
df_iar['REGION'] = region_name
# Select columns for final output table
df_iar_final = df_iar[['REGION',
'TECHNOLOGY',
'FUEL',
'MODE_OF_OPERATION',
'YEAR',
'VALUE',]]
# Don't write this yet - we'll write both IAR and OAR at the end...
# df_iar_final.to_csv(r"output/InputActivityRatio.csv", index = None)
return df_iar_final
def output_activity_ratios(df_ratios, thermal_fuel_list, region_name):
"""OutputActivityRatio - Power Generation Technologies
"""
df_oar = df_ratios.copy()
mask = df_oar['TECHNOLOGY'].apply(lambda x: x[3:6] in thermal_fuel_list)
df_oar['FUEL'] = 0
df_oar['FUEL'][mask] = 1
df_oar = df_oar.loc[~((df_oar['MODE_OF_OPERATION'] > 1) &
(df_oar['FUEL'] == 0))]
df_oar['FUEL'] = ('ELC' +
df_oar['TECHNOLOGY'].str[6:11] +
'01'
)
df_oar['VALUE'] = 1
# Add 'REGION' column and fill 'GLOBAL' throughout
df_oar['REGION'] = region_name
# Select columns for final output table
df_oar_final = df_oar[['REGION',
'TECHNOLOGY',
'FUEL',
'MODE_OF_OPERATION',
'YEAR',
'VALUE',]]
return df_oar, df_oar_final
def create_generators(df, df_dict, model_start_year, df_op_life, df_tech_code):
df_gen_2 = create_main_generator_table(df)
df_dict_2 = compile_powerplants_nodes_fuels(df_dict)
## Merge original generator dataframe with nodes and fuels
df_gen_2 = pd.merge(df_gen_2, df_dict_2, how="outer", on="powerplant")
df_gen_2.rename(
{"child_object_x": "fuel", "child_object_y": "node"}, axis=1, inplace=True
)
## Extract start year from Commission Date
df_gen_2["Commission Date"] = pd.to_datetime(df_gen_2["Commission Date"])
df_gen_2["start_year"] = df_gen_2["Commission Date"].dt.year
df_gen_2.drop("Commission Date", axis=1, inplace=True)
## Calculate efficiency from heat rate. Units of heat rate in MJ/kWh
df_gen_2["efficiency"] = 3.6 / df_gen_2["Heat Rate"].astype(float)
df_gen_2.drop("Heat Rate", axis=1, inplace=True)
## Calcluate years of operation from start year until 2015
df_gen_2["years_of_operation"] = model_start_year - df_gen_2["start_year"]
## Fix blank spaces in 'fuels' columns. Appearing for 'Oil' powerplants in certain countries
df_gen_2.loc[df_gen_2["fuel"].isna(), "fuel"] = (
df_gen_2["node"].str.split("-").str[:2].str.join("-")
+ " "
+ df_gen_2["powerplant"].str.split("_", expand=True)[1]
)
## Create column for technology
df_gen_2["technology"] = df_gen_2["powerplant"].str.split("_").str[1]
df_gen_2["technology"] = df_gen_2["technology"].str.title()
## Divide Gas into CCGT and OCGT based on max capacity
df_gen_2.loc[
(df_gen_2["technology"] == "Gas") & (df_gen_2["Max Capacity"].astype(float) > 130),
"technology",
] = "Gas-CCGT"
df_gen_2.loc[
(df_gen_2["technology"] == "Gas") & (df_gen_2["Max Capacity"].astype(float) <= 130),
"technology",
] = "Gas-OCGT"
# Add region and country code columns
df_gen_2['region_code'] = df_gen_2['node'].str[:2]
df_gen_2['country_code'] = df_gen_2['node'].str[3:]
# ### Add operational life column
op_life_dict = dict(zip(list(df_op_life['tech']),
list(df_op_life['years'])))
df_gen_2['operational_life'] = df_gen_2['technology'].map(op_life_dict)
df_gen_2['retirement_year_data'] = (df_gen_2['operational_life']
+ df_gen_2['start_year'])
df_gen_2['retirement_diff'] = ((df_gen_2['years_of_operation']
- df_gen_2['operational_life'])/
df_gen_2['operational_life'])
''' Set retirement year based on years of operation.
If (years of operation - operational life) is more than 50% of
operational life, set retirement year
'''
df_gen_2.loc[df_gen_2['retirement_diff'] >= 0.5,
'retirement_year_model'] = 2025
df_gen_2.loc[(df_gen_2['retirement_diff'] < 0.5) &
(df_gen_2['retirement_diff'] > 0),
'retirement_year_model'] = 2030
df_gen_2.loc[df_gen_2['retirement_diff'] <= 0,
'retirement_year_model'] = df_gen_2['retirement_year_data']
# ### Add naming convention
tech_code_dict = dict(zip(list(df_tech_code['tech']),
list(df_tech_code['code'])))
df_gen_2['tech_code'] = df_gen_2['technology'].map(tech_code_dict)
df_gen_2.loc[df_gen_2['node'].str.len() <= 6,
'node_code'] = (df_gen_2['node'].
str.split('-').
str[1:].
str.join("") +
'XX')
df_gen_2.loc[df_gen_2['node'].str.len() > 6,
'node_code'] = (df_gen_2['node'].
str.split('-').
str[1:].
str.join("")
)
df_gen_2 = df_gen_2.loc[~df_gen_2['tech_code'].isna()]
return df_gen_2
def residual_capacity(df_gen_2, model_start_year, model_end_year, region_name):
"""Calculate residual capacity"""
res_cap_cols = [
"node_code",
"tech_code",
"total_capacity",
"start_year",
"retirement_year_model",
]
df_res_cap = df_gen_2[res_cap_cols]
for each_year in range(model_start_year, model_end_year+1):
df_res_cap[str(each_year)] = 0
df_res_cap = pd.melt(
df_res_cap,
id_vars=res_cap_cols,
value_vars=[x for x in df_res_cap.columns if x not in res_cap_cols],
var_name="model_year",
value_name="value",
)
df_res_cap["model_year"] = df_res_cap["model_year"].astype(int)
df_res_cap.loc[
(df_res_cap["model_year"] >= df_res_cap["start_year"])
& (df_res_cap["model_year"] <= df_res_cap["retirement_year_model"]),
"value",
] = df_res_cap["total_capacity"]
df_res_cap = df_res_cap.groupby(
["node_code", "tech_code", "model_year"], as_index=False
)["value"].sum()
# Add column with naming convention
df_res_cap['node_code'] = df_res_cap['node_code']
df_res_cap['tech'] = ('PWR' +
df_res_cap['tech_code'] +
df_res_cap['node_code'] + '01'
)
# Convert total capacity from MW to GW
df_res_cap['value'] = df_res_cap['value'].div(1000)
df_res_cap_plot = df_res_cap[['node_code',
'tech_code',
'model_year',
'value']]
# Rename 'model_year' to 'year' and 'total_capacity' to 'value'
df_res_cap.rename({'tech':'TECHNOLOGY',
'model_year':'YEAR',
'value':'VALUE'},
inplace = True,
axis=1)
# Drop 'tech_code' and 'node_code'
df_res_cap.drop(['tech_code', 'node_code'], inplace = True, axis=1)
# Add 'REGION' column and fill 'GLOBAL' throughout
df_res_cap['REGION'] = region_name
#Reorder columns
df_res_cap = df_res_cap[['REGION', 'TECHNOLOGY', 'YEAR', 'VALUE']]
return df_res_cap
def average_inputactivityratio_by_node_tech(df_gen_2):
"""Calculate average InputActivityRatio by node+technology and only by technology
"""
df_eff = df_gen_2[['node_code',
'efficiency',
'tech_code']]
# Average efficiency by node and technology
df_eff_node = df_eff.groupby(['tech_code',
'node_code'],
as_index = False).agg('mean')
df_eff_node['node_average_iar'] = ((1 / df_eff_node['efficiency']).
round(2))
df_eff_node.drop('efficiency',
axis = 1,
inplace = True)
# Average efficiency by technology
df_eff_tech = df_eff.groupby('tech_code',
as_index = False).agg('mean')
df_eff_tech['tech_average_iar'] = ((1 / df_eff_tech['efficiency']).
round(2))
df_eff_tech.drop('efficiency',
axis = 1,
inplace = True)
return df_eff_node, df_eff_tech
def final_costs(each_cost, df_costs, df_oar_final, weo_regions_dict):
df_costs_temp = df_costs.loc[df_costs['parameter'].str.contains(each_cost)]
df_costs_temp.drop(['technology', 'parameter'],
axis = 1,
inplace = True)
df_costs_final = df_oar_final[['REGION',
'TECHNOLOGY',
'YEAR'
]]
df_costs_final['YEAR'] = df_costs_final['YEAR'].astype(int)
df_costs_final = df_costs_final.drop_duplicates()
df_costs_final = (df_costs_final
.loc[(df_costs_final['TECHNOLOGY']
.str.startswith('PWR')
) &
(~df_costs_final['TECHNOLOGY']
.str.contains('TRN')
)
]
)
df_costs_final['technology_code'] = df_costs_final['TECHNOLOGY'].str[3:6]
df_costs_final['weo_region'] = df_costs_final['TECHNOLOGY'].str[6:9]
df_costs_final['weo_region'] = (df_costs_final['weo_region']
.replace(weo_regions_dict))
df_costs_final = pd.merge(df_costs_final,
df_costs_temp,
on = ['technology_code', 'weo_region', 'YEAR'],
how = 'left'
)
df_costs_final.drop(['technology_code', 'weo_region'],
axis = 1,
inplace = True)
df_costs_final = df_costs_final.fillna(-9)
df_costs_final = pd.pivot_table(df_costs_final,
index = ['REGION', 'YEAR'],
columns = 'TECHNOLOGY',
values = 'value').reset_index()
df_costs_final = df_costs_final.replace([-9],[np.nan])
#df_costs_final.set_index(['REGION', 'YEAR'],
# inplace = True)
df_costs_final = df_costs_final.interpolate(method = 'linear',
limit_direction='forward').round(2)
df_costs_final = df_costs_final.interpolate(method = 'linear',
limit_direction='backward').round(2)
df_costs_final = pd.melt(df_costs_final,
id_vars = ['REGION', 'YEAR'],
value_vars = [x for x in df_costs_final.columns
if x not in ['REGION', 'YEAR']
],
var_name = 'TECHNOLOGY',
value_name = 'VALUE'
)
df_costs_final = df_costs_final[['REGION', 'TECHNOLOGY', 'YEAR', 'VALUE']]
df_costs_final = df_costs_final[~df_costs_final['VALUE'].isnull()]
return df_costs_final
def create_weo_region_mapping(df_weo_regions):
weo_regions_dict = dict([(k, v)
for k, v
in zip(df_weo_regions['technology_code'],
df_weo_regions['weo_region']
)
]
)
return weo_regions_dict
def capital_fixed_var_costs(df_weo_data):
# ### Costs: Capital, fixed, and variable
df_costs = pd.melt(df_weo_data,
id_vars = ['technology', 'weo_region', 'parameter'],
value_vars = ['2017', '2030', '2040'],
var_name = ['YEAR'])
df_costs['parameter'] = df_costs['parameter'].str.split('\r\n').str[0]
df_costs['value'] = df_costs['value'].replace({'n.a.':0})
df_costs['value'] = df_costs['value'].astype(float)
df_costs = df_costs.pivot_table(index = ['technology', 'parameter', 'YEAR'],
columns = 'weo_region',
values = 'value').reset_index()
df_costs['AS_average'] = (df_costs['China'] +
df_costs['India'] +
df_costs['Japan'] +
df_costs['Middle East']).div(4)
df_costs['NA_average'] = (df_costs['United States'])
df_costs['SA_average'] = (df_costs['Brazil'])
df_costs['Global_average'] = (df_costs['Africa'] +
df_costs['Brazil'] +
df_costs['Europe'] +
df_costs['China'] +
df_costs['India'] +
df_costs['Japan'] +
df_costs['Middle East'] +
df_costs['Russia'] +
df_costs['United States']).div(9)
df_costs = pd.melt(df_costs,
id_vars = ['technology', 'parameter', 'YEAR'],
value_vars = [x
for x
in df_costs.columns
if x not in ['technology', 'parameter', 'YEAR']
]
)
df_costs['YEAR'] = df_costs['YEAR'].astype(int)
costs_dict = {'Biomass - waste incineration - CHP':'WAS',
'Biomass Power plant':'BIO',
'CCGT':'CCG',
'CCGT - CHP':'COG',
'Concentrating solar power':'CSP',
'Gas turbine':'OCG',
'Geothermal':'GEO',
'Hydropower - large-scale':'HYD',
'Marine':'WAV',
'Nuclear':'URN',
'Solar photovoltaics - Large scale':'SPV',
'Steam Coal - SUBCRITICAL':'COA',
'Steam Coal - SUPERCRITICAL':'COA',
'Steam Coal - ULTRASUPERCRITICAL':'COA',
'Wind onshore':'WON'} # Missing OIL, OTH, PET, WOF
df_costs = df_costs.loc[df_costs['technology'].isin(costs_dict.keys())]
df_costs['technology_code'] = df_costs['technology'].replace(costs_dict)
return df_costs
def ratio_master_table(df_gen_2, years):
# Create master table for activity ratios
node_list = list(df_gen_2['node_code'].unique())
# Add extra nodes which are not present in 2015 but will be by 2050
nodes_extra_list = ['AF-SOM', 'AF-TCD', 'AS-TLS', 'EU-MLT', 'NA-BLZ', 'NA-HTI', 'SA-BRA-J1', 'SA-BRA-J2', 'SA-BRA-J3', 'SA-SUR']
for each_node in nodes_extra_list:
if len(each_node) <= 6:
node_list.append("".join(each_node.split('-')[1:]) + 'XX')
else:
node_list.append("".join(each_node.split('-')[1:]))
master_fuel_list = list(df_gen_2['tech_code'].unique())
df_ratios = pd.DataFrame(list(itertools.product(node_list,
master_fuel_list,
MODE_LIST,
years)
),
columns=['node_code', 'tech_code', 'MODE_OF_OPERATION', 'YEAR']
)
df_ratios['TECHNOLOGY'] = ('PWR' +
df_ratios['tech_code'] +
df_ratios['node_code'] + '01'
)
return df_ratios
def get_years(model_start_year, model_end_year):
return list(range(model_start_year,
model_end_year + 1))
def main(INPUT_PATH, OUTPUT_PATH, model_start_year=2015, model_end_year=2050, region_name='GLOBAL'):
df, df_dict = get_data(INPUT_PATH)
df_weo_data = pd.read_csv(os.path.join(INPUT_PATH, "weo_2018_powerplant_costs.csv"))
df_op_life = pd.read_csv(os.path.join(INPUT_PATH, "operational_life.csv"))
df_tech_code = pd.read_csv(os.path.join(INPUT_PATH, "naming_convention_tech.csv"))
df_trn_efficiencies = pd.read_excel(os.path.join(INPUT_PATH, "Costs Line expansion.xlsx"))
df_weo_regions = pd.read_csv(os.path.join(INPUT_PATH, "weo_region_mapping.csv"))
emissions = []
# Create 'output' directory if it doesn't exist
if not os.path.exists(OUTPUT_PATH):
os.makedirs(OUTPUT_PATH)
df_gen_2 = create_generators(df, df_dict, model_start_year, df_op_life, df_tech_code)
df_res_cap = residual_capacity(df_gen_2, model_start_year, model_end_year, region_name)
filepath = os.path.join(OUTPUT_PATH, 'ResidualCapacity.csv')
df_res_cap.to_csv(filepath, index=None)
# ### Add input and output activity ratios
thermal_fuel_list = ['COA', 'COG', 'OCG', 'CCG', 'PET', 'URN', 'OIL', 'OTH']
thermal_fuel_list_iar = ['COA', 'COG', 'PET', 'URN', 'OIL', 'OTH']
renewables_list = ['BIO', 'GEO', 'HYD', 'SPV', 'CSP', 'WAS', 'WAV', 'WON', 'WOF']
# Calculate Input and OutputActivityRatio for: Power Generation
df_oar_final, df_iar_final = calculate_activity_ratios(thermal_fuel_list, region_name,
thermal_fuel_list_iar, renewables_list,
df_gen_2, df, model_start_year,
model_end_year, df_trn_efficiencies)
filepath = os.path.join(OUTPUT_PATH, "OutputActivityRatio.csv")
df_oar_final.to_csv(filepath, index=None)
filepath = os.path.join(OUTPUT_PATH, "InputActivityRatio.csv")
df_iar_final.to_csv(filepath, index=None)
# ### Costs: Capital, fixed, and variable
df_costs = capital_fixed_var_costs(df_weo_data)
weo_regions_dict = create_weo_region_mapping(df_weo_regions)
capex = final_costs("Capital", df_costs, df_oar_final, weo_regions_dict)
capex.to_csv(os.path.join(OUTPUT_PATH, 'CapitalCost.csv'), index=None)
fixed = final_costs("O&M", df_costs, df_oar_final, weo_regions_dict)
fixed.to_csv(os.path.join(OUTPUT_PATH, 'FixedCost.csv'), index=None)
# ## Create sets for TECHNOLOGIES, FUELS
def create_sets(x: str) -> None:
set_elements = list(df_iar_final[x].unique()) + list(df_oar_final[x].unique())
set_elements = list(set(set_elements))
set_elements.sort()
set_elements_df = pd.DataFrame(set_elements, columns=['VALUE'])
return set_elements_df.to_csv(os.path.join(OUTPUT_PATH, str(x) + '.csv'),
index=None
)
create_sets('TECHNOLOGY')
create_sets('FUEL')
# ## Create set for YEAR, REGION, MODE_OF_OPERATION
years = get_years(model_start_year, model_end_year)
years_df = | pd.DataFrame(years, columns=['VALUE']) | pandas.DataFrame |
import argparse
import json
import logging
import sys
from collections import Counter
from json import JSONDecodeError
from pathlib import Path
from typing import Dict, List, Optional
sys.path.append('')
sys.path.append('../../..')
import pandas as pd
from pandarallel import pandarallel
from hyperstyle.src.python.review.application_config import LanguageVersion
from hyperstyle.src.python.review.common.file_system import Extension, get_total_code_lines_from_code
from hyperstyle.src.python.review.common.language import Language
from hyperstyle.src.python.review.inspectors.issue import BaseIssue, ISSUE_TYPE_TO_CLASS, IssueType, Measurable
from hyperstyle.src.python.review.quality.rules.code_style_scoring import CodeStyleRule
from hyperstyle.src.python.review.quality.rules.line_len_scoring import LineLengthRule
from hyperstyle.src.python.review.reviewers.utils.code_statistics import get_code_style_lines
from analysis.src.python.evaluation.common.pandas_util import get_solutions_df_by_file_path, write_df_to_file
from analysis.src.python.evaluation.common.csv_util import ColumnName
from analysis.src.python.evaluation.common.file_util import get_parent_folder
from analysis.src.python.evaluation.issues_statistics.common.raw_issue_encoder_decoder import RawIssueDecoder
from analysis.src.python.evaluation.issues_statistics.get_raw_issues import RAW_ISSUES
ID = ColumnName.ID.value
LANG = ColumnName.LANG.value
CODE = ColumnName.CODE.value
CODE_STYLE_LINES = f'{IssueType.CODE_STYLE.value}_lines'
CODE_STYLE_RATIO = f'{IssueType.CODE_STYLE.value}_ratio'
LINE_LEN_NUMBER = f'{IssueType.LINE_LEN.value}_number'
LINE_LEN_RATIO = f'{IssueType.LINE_LEN.value}_ratio'
TOTAL_LINES = 'total_lines'
VALUE = 'value'
OUTPUT_DF_NAME = 'stats'
DEFAULT_OUTPUT_FOLDER_NAME = 'raw_issues_statistics'
logger = logging.getLogger(__name__)
def configure_arguments(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'solutions_with_raw_issues',
type=lambda value: Path(value).absolute(),
help=f'Local XLSX-file or CSV-file path. Your file must include column-names: '
f'"{ID}", "{CODE}", "{LANG}", and "{RAW_ISSUES}".',
)
parser.add_argument(
'-o', '--output',
type=lambda value: Path(value).absolute(),
help='Path to the folder where datasets with statistics will be saved. '
'If not specified, the datasets will be saved in the folder next to the original one.',
)
parser.add_argument(
'-l', '--log-output',
type=lambda value: Path(value).absolute(),
help='Path where logs will be stored. If not specified, then logs will be output to stderr.',
)
def _convert_language_code_to_language(fragment_id: str, language_code: str) -> str:
language_version = LanguageVersion.from_value(language_code)
if language_version is None:
logger.warning(f'{fragment_id}: it was not possible to determine the language version from "{language_code}".')
return language_code
language = Language.from_language_version(language_version)
if language == Language.UNKNOWN:
logger.warning(f'{fragment_id}: it was not possible to determine the language from "{language_version}".')
return language_code
return language.value
def _extract_stats_from_issues(row: pd.Series) -> pd.Series:
print(f'{row[ID]}: extracting stats.')
if pd.isnull(row[CODE]):
logger.warning(f'{row[ID]}: no code.')
row[CODE] = ""
if pd.isnull(row[LANG]):
logger.warning(f'{row[ID]}: no lang.')
row[LANG] = ""
try:
issues: List[BaseIssue] = json.loads(row[RAW_ISSUES], cls=RawIssueDecoder)
except (JSONDecodeError, TypeError):
logger.warning(f'{row[ID]}: failed to decode issues.')
issues: List[BaseIssue] = []
counter = Counter([issue.type for issue in issues])
for issue_type, issue_class in ISSUE_TYPE_TO_CLASS.items():
if issubclass(issue_class, Measurable):
row[issue_type.value] = [issue.measure() for issue in issues if isinstance(issue, issue_class)]
else:
row[issue_type.value] = counter[issue_type]
row[CODE_STYLE_LINES] = get_code_style_lines(issues)
row[LINE_LEN_NUMBER] = counter[IssueType.LINE_LEN]
row[TOTAL_LINES] = get_total_code_lines_from_code(row[CODE])
row[LANG] = _convert_language_code_to_language(row[ID], row[LANG])
print(f'{row[ID]}: extraction of statistics is complete.')
return row
def _convert_ratio_to_int(ratio: float):
"""
Round the ratio to 2 decimal places, multiply by 100, and take the integer part.
"""
return int((round(ratio, 2) * 100))
def _group_stats_by_lang(df_with_stats: pd.DataFrame) -> Dict[str, pd.DataFrame]:
logger.info('The grouping of statistics by language has started.')
result = {}
df_grouped_by_lang = df_with_stats.groupby(LANG)
for lang in df_grouped_by_lang.groups:
logger.info(f'"{lang}" statistics grouping started.')
lang_group = df_grouped_by_lang.get_group(lang)
columns_with_stats = []
for issue_type, issue_class in ISSUE_TYPE_TO_CLASS.items():
column = lang_group[issue_type.value]
if issubclass(issue_class, Measurable):
column = column.explode()
columns_with_stats.append(column.value_counts())
columns_with_stats.append(lang_group[TOTAL_LINES].value_counts())
line_len_ratio_column = lang_group.apply(
lambda row: LineLengthRule.get_ratio(row[LINE_LEN_NUMBER], row[TOTAL_LINES]),
axis=1,
)
line_len_ratio_column = line_len_ratio_column.apply(_convert_ratio_to_int)
line_len_ratio_column.name = LINE_LEN_RATIO
columns_with_stats.append(line_len_ratio_column.value_counts())
code_style_ratio_column = lang_group.apply(
lambda row: CodeStyleRule.get_ratio(
row[CODE_STYLE_LINES], row[TOTAL_LINES], Language.from_value(str(lang), default=Language.UNKNOWN),
),
axis=1,
)
code_style_ratio_column = code_style_ratio_column.apply(_convert_ratio_to_int)
code_style_ratio_column.name = CODE_STYLE_RATIO
columns_with_stats.append(code_style_ratio_column.value_counts())
stats = | pd.concat(columns_with_stats, axis=1) | pandas.concat |
import os
import pandas as pd
import pickle
import numpy as np
import lightgbm as lgb
import xgboost as xgb
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVR
from sklearn.model_selection import KFold
from openpyxl import load_workbook
from config import parse_args
from src.log import Logger
from src.utils import Jaccard,Cosine,Peason
def save_args(args, logger):
save_args_file = os.path.join(args.root_path, "args.txt")
line = str(args)
with open(save_args_file, mode="w", encoding="utf-8") as wfp:
wfp.write(line + "\n")
logger.info("Args saved in file%s" % save_args_file)
def check_path(args):
assert os.path.exists("./data")
if not os.path.exists(args.log_path):
os.mkdir(args.log_path)
if not os.path.exists(args.processed_path):
os.mkdir(args.processed_path)
def xgb_train(args):
root_path = os.path.join(args.log_path,"third")
# dataset preparing
# determine inputs dtype
value_mol_file = os.path.join(args.raw_path, "Molecular_Descriptor.xlsx")
admet_file = os.path.join(args.raw_path, "ADMET.xlsx")
admet_mat_train = pd.read_excel(admet_file, sheet_name="training")
admet_mat_test = pd.read_excel(admet_file, sheet_name="test")
admet_mat_test_ext = admet_mat_test.copy()
x_values = pd.read_excel(value_mol_file, sheet_name="training")
x_values_test = pd.read_excel(value_mol_file, sheet_name="test")
names_list = admet_mat_train.columns.values[1:]
all_result = [["names", "Pearson", "Jaccard", "Cosine"]]
# booster:
params = {'booster': 'gbtree',
'objective': 'binary:logistic',
'eval_metric': 'auc',
'max_depth': 10,
'lambda': 10,
'subsample': 0.75,
'colsample_bytree': 0.75,
'min_child_weight': 1,
'eta': 0.025,
'seed': 0,
'nthread': 8,
'silent': 1,
'gamma': 0.25,
'learning_rate': 0.2}
for raw_name in names_list:
y_values = admet_mat_train[raw_name]
length = len(x_values)
train_len = int(0.75 * length)
train_x = x_values.iloc[:train_len, 1:]
value_names = train_x.columns.values
validate_x = x_values.iloc[train_len:, 1:].values
train_y = y_values.iloc[:train_len].values
train_y = train_y.reshape(train_len, 1)
validate_y = y_values.iloc[train_len:].values
validate_y = validate_y.reshape(length - train_len, 1)
# 算法参数
dtrain = xgb.DMatrix(train_x.values, label=train_y)
dvalidate = xgb.DMatrix(validate_x, label=validate_y)
watchlist = [(dvalidate, 'val')]
# 建模与预测:NUM_BOOST_round迭代次数和数的个数一致
bst_model = xgb.train(params, dtrain, num_boost_round=50, evals=watchlist)
# 对测试集进行预测
test_x = x_values_test.iloc[:, 1:].values
dtest = xgb.DMatrix(test_x)
dvalidate = xgb.DMatrix(validate_x)
test_y = bst_model.predict(dtest)
predict_y = bst_model.predict(dvalidate)
validate_y = validate_y.reshape(length - train_len)
pea = Peason(predict_y, validate_y)
jac = Jaccard(predict_y, validate_y)
cos = Cosine(predict_y, validate_y)
all_result.append([raw_name, pea, jac, cos])
length = len(test_y)
admet_mat_test[raw_name] = test_y.reshape(length, 1)
admet_mat_test_ext[raw_name] = np.round(test_y.reshape(length, 1))
xgb.plot_importance(bst_model,max_num_features=20)
save_fig_result = os.path.join(root_path, "xgb_features_importance_" + raw_name + ".png")
'''
save_xlsx_result = os.path.join(root_path, "xgb_features_importance_" + raw_name + ".xlsx")
output = bst_model.feature_importances_
importance_list = ["names", "importance"]
for k in range(len(value_names)):
importance_list.append([value_names[k], output[k]])
pd.DataFrame(importance_list).to_excel(save_xlsx_result, index=None)
'''
plt.savefig(save_fig_result)
plt.close()
save_test_result = os.path.join(root_path, "ADMET_xgb_result.xlsx")
admet_mat_test.to_excel(save_test_result, index=None)
save_test_result = os.path.join(root_path, "ADMET_xgb_result_binary.xlsx")
admet_mat_test_ext.to_excel(save_test_result, index=None)
save_test_result = os.path.join(root_path, "xgb_result.xlsx")
pd.DataFrame(all_result).to_excel(save_test_result, index=None)
save_test_params = os.path.join(root_path, "ADMET_xgb_params.txt")
with open(save_test_params, encoding="utf8", mode="w") as wfp:
wfp.write(str(params))
def lgb_train(args):
# dataset preparing
# determine inputs dtype
value_mol_file = os.path.join(args.raw_path, "Molecular_Descriptor.xlsx")
root_path = os.path.join(args.log_path, "third")
admet_file = os.path.join(args.raw_path, "ADMET.xlsx")
admet_mat_train = pd.read_excel(admet_file,sheet_name="training")
admet_mat_test = pd.read_excel(admet_file,sheet_name="test")
admet_mat_test_ext = admet_mat_test.copy()
x_values = | pd.read_excel(value_mol_file,sheet_name="training") | pandas.read_excel |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
def main():
try:
df_train = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.data', header=None)
df_test = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.test',
skiprows=[0], header=None)
except:
df_train = | pd.read_csv('adult.data', header=None) | pandas.read_csv |
import pickle
# libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
makerList = ['o', '*', '+', 'D', 's', '-']
#labelList= ["greedy", "random", "e-greedy", "boltzmann", "bayesian"]
labelList = ["vallina actor-critic", "active actor-critic",
"e-greedy", "boltzmann", "bayesian"]
# load the original
with open('ac_orig_cartpole1.pickle', 'rb') as fp:
[a1, b1, lens1, rewards1] = pickle.load(fp)
with open('ac_active_cartpole.pickle', 'rb') as fp:
[a2, b2, lens2, rewards2] = pickle.load(fp)
smoothing_window = 10
# plot the how episode length change over time
fig1 = plt.figure(figsize=(10, 5))
i = 0
x = range(len(lens1))
plt.plot(x, lens1, marker=makerList[i],
label=labelList[i]) # plotting by columns
i = i + 1
x = range(len(lens1))
plt.plot(x, lens2, marker=makerList[i],
label=labelList[i]) # plotting by columns
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
ax = plt.gca()
ax.legend(loc='best')
plt.show()
# plot the how episode reward change over time
fig2 = plt.figure(figsize=(10, 5))
i = 0
x = range(len(rewards1))
rewards_smoothed = pd.Series(rewards1).rolling(
smoothing_window, min_periods=smoothing_window).mean()
x = range(len(rewards_smoothed))
# plotting by columns
plt.plot(x, rewards_smoothed, marker=makerList[i], label=labelList[i])
i = i + 1
rewards_smoothed = | pd.Series(rewards2) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Pipeline between reV and RPM
"""
from concurrent.futures import as_completed
import logging
import os
import pandas as pd
import psutil
from warnings import warn
from reVX.handlers.outputs import Outputs
from reVX.rpm.rpm_clusters import RPMClusters
from reVX.rpm.rpm_output import RPMOutput
from reVX.utilities.exceptions import RPMValueError, RPMRuntimeError
from reVX.utilities.utilities import log_versions
from rex.utilities.execution import SpawnProcessPool
logger = logging.getLogger(__name__)
class RPMClusterManager:
"""
RPM Cluster Manager:
- Extracts gids for all RPM regions
- Runs RPMClusters in parallel for all regions
- Save results to disk
"""
def __init__(self, cf_fpath, rpm_meta, rpm_region_col=None,
max_workers=None):
"""
Parameters
----------
cf_fpath : str
Path to reV .h5 file containing desired capacity factor profiles
rpm_meta : pandas.DataFrame | str
DataFrame or path to .csv or .json containing the RPM meta data:
- Categorical regions of interest with column label "region"
- # of clusters per region with column label "clusters"
- A column that maps the RPM regions to the cf_fpath meta data:
"res_gid" (priorized) or "gen_gid". This can be omitted if the
rpm_region_col kwarg input is found in the cf_fpath meta
rpm_region_col : str | Nonetype
If not None, the meta-data field to map RPM regions to
max_workers : int, optional
Number of parallel workers. 1 will run serial, None will use all
available., by default None
"""
log_versions(logger)
if rpm_region_col is not None:
logger.info('Initializing RPM clustering on regional column "{}".'
.format(rpm_region_col))
self._cf_h5 = cf_fpath
self._rpm_regions = self._map_rpm_regions(rpm_meta,
region_col=rpm_region_col)
if max_workers is None:
max_workers = os.cpu_count()
self.max_workers = max_workers
@staticmethod
def _parse_rpm_meta(rpm_meta):
"""
Extract rpm meta and map it to the cf profile data
Parameters
----------
rpm_meta : pandas.DataFrame | str
DataFrame or path to .csv or .json containing the RPM meta data:
- Categorical regions of interest with column label "region"
- # of clusters per region with column label "clusters"
- A column that maps the RPM regions to the cf_fpath meta data:
"res_gid" (priorized) or "gen_gid". This can be omitted if the
rpm_region_col kwarg input is found in the cf_fpath meta
Returns
-------
rpm_meta : pandas.DataFrame
DataFrame of RPM regional meta data (clusters and cf/resource GIDs)
"""
if isinstance(rpm_meta, str):
if rpm_meta.endswith('.csv'):
rpm_meta = | pd.read_csv(rpm_meta) | pandas.read_csv |
import copy
import datetime
import inspect
import itertools
import json
import logging
import traceback
import warnings
from collections import defaultdict, namedtuple
from collections.abc import Hashable
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
from dateutil.parser import parse
from tqdm.auto import tqdm
from great_expectations import __version__ as ge_version
from great_expectations.core.batch import Batch, BatchDefinition, BatchMarkers
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_suite import (
ExpectationSuite,
expectationSuiteSchema,
)
from great_expectations.core.expectation_validation_result import (
ExpectationSuiteValidationResult,
ExpectationValidationResult,
)
from great_expectations.core.id_dict import BatchSpec
from great_expectations.core.run_identifier import RunIdentifier
from great_expectations.data_asset.util import recursively_convert_to_json_serializable
from great_expectations.dataset import PandasDataset, SparkDFDataset, SqlAlchemyDataset
from great_expectations.dataset.sqlalchemy_dataset import SqlAlchemyBatchReference
from great_expectations.exceptions import (
GreatExpectationsError,
InvalidExpectationConfigurationError,
MetricResolutionError,
)
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
SqlAlchemyExecutionEngine,
)
from great_expectations.execution_engine.pandas_batch_data import PandasBatchData
from great_expectations.expectations.registry import (
get_expectation_impl,
get_metric_provider,
list_registered_expectation_implementations,
)
from great_expectations.marshmallow__shade import ValidationError
from great_expectations.types import ClassConfig
from great_expectations.util import load_class, verify_dynamic_loading_support
from great_expectations.validator.exception_info import ExceptionInfo
from great_expectations.validator.metric_configuration import MetricConfiguration
from great_expectations.validator.validation_graph import (
ExpectationValidationGraph,
MetricEdge,
ValidationGraph,
)
logger = logging.getLogger(__name__)
logging.captureWarnings(True)
try:
import pandas as pd
except ImportError:
pd = None
logger.debug(
"Unable to load pandas; install optional pandas dependency for support."
)
MAX_METRIC_COMPUTATION_RETRIES = 3
ValidationStatistics = namedtuple(
"ValidationStatistics",
[
"evaluated_expectations",
"successful_expectations",
"unsuccessful_expectations",
"success_percent",
"success",
],
)
def _calc_validation_statistics(
validation_results: List[ExpectationValidationResult],
) -> ValidationStatistics:
"""
Calculate summary statistics for the validation results and
return ``ExpectationStatistics``.
"""
# calc stats
successful_expectations = sum(exp.success for exp in validation_results)
evaluated_expectations = len(validation_results)
unsuccessful_expectations = evaluated_expectations - successful_expectations
success = successful_expectations == evaluated_expectations
try:
success_percent = successful_expectations / evaluated_expectations * 100
except ZeroDivisionError:
# success_percent = float("nan")
success_percent = None
return ValidationStatistics(
successful_expectations=successful_expectations,
evaluated_expectations=evaluated_expectations,
unsuccessful_expectations=unsuccessful_expectations,
success=success,
success_percent=success_percent,
)
class Validator:
DEFAULT_RUNTIME_CONFIGURATION = {
"include_config": True,
"catch_exceptions": False,
"result_format": "BASIC",
}
RUNTIME_KEYS = DEFAULT_RUNTIME_CONFIGURATION.keys()
# noinspection PyUnusedLocal
def __init__(
self,
execution_engine: ExecutionEngine,
interactive_evaluation: bool = True,
expectation_suite: Optional[ExpectationSuite] = None,
expectation_suite_name: Optional[str] = None,
data_context: Optional[
Any
] = None, # Cannot type DataContext due to circular import
batches: Optional[List[Batch]] = None,
**kwargs,
):
"""
Validator is the key object used to create Expectations, validate Expectations,
and get Metrics for Expectations.
Additionally, note that Validators are used by Checkpoints under-the-hood.
:param execution_engine (ExecutionEngine):
:param interactive_evaluation (bool):
:param expectation_suite (Optional[ExpectationSuite]):
:param expectation_suite_name (Optional[str]):
:param data_context (Optional[DataContext]):
:param batches (Optional[List[Batch]]):
"""
self._data_context = data_context
self._execution_engine = execution_engine
self._expose_dataframe_methods = False
self._validator_config = {}
if batches is None:
batches = []
self._batches = {}
self._active_batch_id = None
self.load_batch_list(batches)
if len(batches) > 1:
logger.debug(
f"{len(batches)} batches will be added to this Validator. The batch_identifiers for the active "
f"batch are {self.active_batch.batch_definition['batch_identifiers'].items()}"
)
self.interactive_evaluation = interactive_evaluation
self._initialize_expectations(
expectation_suite=expectation_suite,
expectation_suite_name=expectation_suite_name,
)
self._default_expectation_args = copy.deepcopy(
Validator.DEFAULT_RUNTIME_CONFIGURATION
)
self._validator_config = {}
# This special state variable tracks whether a validation run is going on, which will disable
# saving expectation config objects
self._active_validation = False
if self._data_context and hasattr(
self._data_context, "_expectation_explorer_manager"
):
# TODO: verify flow of default expectation arguments
self.set_default_expectation_argument("include_config", True)
def __dir__(self):
"""
This custom magic method is used to enable expectation tab completion on Validator objects.
It also allows users to call Pandas.DataFrame methods on Validator objects
"""
validator_attrs = set(super().__dir__())
class_expectation_impls = set(list_registered_expectation_implementations())
# execution_engine_expectation_impls = (
# {
# attr_name
# for attr_name in self.execution_engine.__dir__()
# if attr_name.startswith("expect_")
# }
# if self.execution_engine
# else set()
# )
combined_dir = (
validator_attrs
| class_expectation_impls
# | execution_engine_expectation_impls
)
if self._expose_dataframe_methods:
combined_dir | set(dir(pd.DataFrame))
return list(combined_dir)
@property
def expose_dataframe_methods(self) -> bool:
return self._expose_dataframe_methods
@expose_dataframe_methods.setter
def expose_dataframe_methods(self, value: bool) -> None:
self._expose_dataframe_methods = value
def __getattr__(self, name):
name = name.lower()
if name.startswith("expect_") and get_expectation_impl(name):
return self.validate_expectation(name)
elif (
self._expose_dataframe_methods
and isinstance(self.active_batch.data, PandasBatchData)
and hasattr(pd.DataFrame, name)
):
return getattr(self.active_batch.data.dataframe, name)
else:
raise AttributeError(
f"'{type(self).__name__}' object has no attribute '{name}'"
)
def validate_expectation(self, name: str):
"""
Given the name of an Expectation, obtains the Class-first Expectation implementation and utilizes the
expectation's validate method to obtain a validation result. Also adds in the runtime configuration
Args:
name (str): The name of the Expectation being validated
Returns:
The Expectation's validation result
"""
def inst_expectation(*args, **kwargs):
# this is used so that exceptions are caught appropriately when they occur in expectation config
basic_default_expectation_args = {
k: v
for k, v in self.default_expectation_args.items()
if k in Validator.RUNTIME_KEYS
}
basic_runtime_configuration = copy.deepcopy(basic_default_expectation_args)
basic_runtime_configuration.update(
{k: v for k, v in kwargs.items() if k in Validator.RUNTIME_KEYS}
)
expectation_impl = get_expectation_impl(name)
allowed_config_keys = expectation_impl.get_allowed_config_keys()
expectation_kwargs = recursively_convert_to_json_serializable(kwargs)
meta = None
# This section uses Expectation class' legacy_method_parameters attribute to maintain support for passing
# positional arguments to expectation methods
legacy_arg_names = expectation_impl.legacy_method_parameters.get(
name, tuple()
)
for idx, arg in enumerate(args):
try:
arg_name = legacy_arg_names[idx]
if arg_name in allowed_config_keys:
expectation_kwargs[arg_name] = arg
if arg_name == "meta":
meta = arg
except IndexError:
raise InvalidExpectationConfigurationError(
f"Invalid positional argument: {arg}"
)
configuration = ExpectationConfiguration(
expectation_type=name, kwargs=expectation_kwargs, meta=meta
)
configuration.process_evaluation_parameters(
self._expectation_suite.evaluation_parameters, True, self._data_context
)
exception_info: ExceptionInfo
try:
expectation = expectation_impl(configuration)
"""Given an implementation and a configuration for any Expectation, returns its validation result"""
if not self.interactive_evaluation and not self._active_validation:
validation_result = ExpectationValidationResult(
expectation_config=copy.deepcopy(expectation.configuration)
)
else:
validation_result = expectation.validate(
validator=self,
evaluation_parameters=self._expectation_suite.evaluation_parameters,
data_context=self._data_context,
runtime_configuration=basic_runtime_configuration,
)
# If validate has set active_validation to true, then we do not save the config to avoid
# saving updating expectation configs to the same suite during validation runs
if self._active_validation is True:
stored_config = configuration.get_raw_configuration()
else:
# Append the expectation to the config.
stored_config = self._expectation_suite._add_expectation(
expectation_configuration=configuration.get_raw_configuration(),
send_usage_event=False,
)
# If there was no interactive evaluation, success will not have been computed.
if validation_result.success is not None:
# Add a "success" object to the config
stored_config.success_on_last_run = validation_result.success
if self._data_context is not None:
validation_result = self._data_context.update_return_obj(
self, validation_result
)
except Exception as err:
if basic_runtime_configuration.get("catch_exceptions"):
exception_traceback = traceback.format_exc()
exception_message = f"{type(err).__name__}: {str(err)}"
exception_info = ExceptionInfo(
**{
"exception_traceback": exception_traceback,
"exception_message": exception_message,
}
)
validation_result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
else:
raise err
return validation_result
inst_expectation.__name__ = name
return inst_expectation
@property
def execution_engine(self) -> ExecutionEngine:
"""Returns the execution engine being used by the validator at the given time"""
return self._execution_engine
def list_available_expectation_types(self) -> List[str]:
"""Returns a list of all expectations available to the validator"""
keys = dir(self)
return [
expectation for expectation in keys if expectation.startswith("expect_")
]
def get_metrics(self, metrics: Dict[str, MetricConfiguration]) -> Dict[str, Any]:
"""Return a dictionary with the requested metrics"""
graph: ValidationGraph = ValidationGraph()
for metric_name, metric_configuration in metrics.items():
provider_cls, _ = get_metric_provider(
metric_configuration.metric_name, self.execution_engine
)
for key in provider_cls.domain_keys:
if (
key not in metric_configuration.metric_domain_kwargs
and key in provider_cls.default_kwarg_values
):
metric_configuration.metric_domain_kwargs[
key
] = provider_cls.default_kwarg_values[key]
for key in provider_cls.value_keys:
if (
key not in metric_configuration.metric_value_kwargs
and key in provider_cls.default_kwarg_values
):
metric_configuration.metric_value_kwargs[
key
] = provider_cls.default_kwarg_values[key]
self.build_metric_dependency_graph(
graph=graph,
execution_engine=self._execution_engine,
metric_configuration=metric_configuration,
)
resolved_metrics: Dict[Tuple[str, str, str], Any] = {}
# noinspection PyUnusedLocal
aborted_metrics_info: Dict[
Tuple[str, str, str],
Dict[str, Union[MetricConfiguration, List[Exception], int]],
] = self.resolve_validation_graph(
graph=graph,
metrics=resolved_metrics,
)
return {
metric_name: resolved_metrics[metric_configuration.id]
for (metric_name, metric_configuration) in metrics.items()
}
def get_metric(self, metric: MetricConfiguration) -> Any:
"""return the value of the requested metric."""
return self.get_metrics({"_": metric})["_"]
def graph_validate(
self,
configurations: List[ExpectationConfiguration],
metrics: Optional[Dict[Tuple[str, str, str], Any]] = None,
runtime_configuration: Optional[dict] = None,
) -> List[ExpectationValidationResult]:
"""Obtains validation dependencies for each metric using the implementation of their associated expectation,
then proceeds to add these dependencies to the validation graph, supply readily available metric implementations
to fulfill current metric requirements, and validate these metrics.
Args:
configurations(List[ExpectationConfiguration]): A list of needed Expectation Configurations that
will be used to supply domain and values for metrics.
metrics (dict): A list of currently registered metrics in the registry
runtime_configuration (dict): A dictionary of runtime keyword arguments, controlling semantics
such as the result_format.
Returns:
A list of Validations, validating that all necessary metrics are available.
"""
if runtime_configuration is None:
runtime_configuration = {}
if runtime_configuration.get("catch_exceptions", True):
catch_exceptions = True
else:
catch_exceptions = False
# While evaluating expectation configurations, create sub-graph for every metric dependency and incorporate
# these sub-graphs under corresponding expectation-level sub-graph (state of ExpectationValidationGraph object).
graph: ValidationGraph
expectation_validation_graphs: List[ExpectationValidationGraph] = []
exception_info: ExceptionInfo
processed_configurations = []
# noinspection SpellCheckingInspection
evrs = []
for configuration in configurations:
# Validating
try:
assert (
configuration.expectation_type is not None
), "Given configuration should include expectation type"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
evaluated_config = copy.deepcopy(configuration)
evaluated_config.kwargs.update({"batch_id": self.active_batch_id})
expectation_impl = get_expectation_impl(evaluated_config.expectation_type)
validation_dependencies = expectation_impl().get_validation_dependencies(
evaluated_config, self._execution_engine, runtime_configuration
)["metrics"]
try:
expectation_validation_graph: ExpectationValidationGraph = (
ExpectationValidationGraph(configuration=evaluated_config)
)
for metric_configuration in validation_dependencies.values():
graph = ValidationGraph()
self.build_metric_dependency_graph(
graph=graph,
execution_engine=self._execution_engine,
metric_configuration=metric_configuration,
configuration=evaluated_config,
runtime_configuration=runtime_configuration,
)
expectation_validation_graph.update(graph=graph)
expectation_validation_graphs.append(expectation_validation_graph)
processed_configurations.append(evaluated_config)
except Exception as err:
if catch_exceptions:
exception_traceback = traceback.format_exc()
exception_message = str(err)
exception_info = ExceptionInfo(
**{
"exception_traceback": exception_traceback,
"exception_message": exception_message,
}
)
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=evaluated_config,
)
evrs.append(result)
else:
raise err
# Collect edges from all expectation-level sub-graphs and incorporate them under common suite-level graph.
expectation_validation_graph: ExpectationValidationGraph
edges: List[MetricEdge] = list(
itertools.chain.from_iterable(
[
expectation_validation_graph.graph.edges
for expectation_validation_graph in expectation_validation_graphs
]
)
)
graph = ValidationGraph(edges=edges)
if metrics is None:
metrics = {}
try:
# Resolve overall suite-level graph and process any MetricResolutionError type exceptions that might occur.
aborted_metrics_info: Dict[
Tuple[str, str, str],
Dict[str, Union[MetricConfiguration, Set[ExceptionInfo], int]],
] = self.resolve_validation_graph(
graph=graph,
metrics=metrics,
runtime_configuration=runtime_configuration,
)
# Trace MetricResolutionError occurrences to expectations relying on corresponding malfunctioning metrics.
rejected_configurations: List[ExpectationConfiguration] = []
for expectation_validation_graph in expectation_validation_graphs:
metric_exception_info: Set[
ExceptionInfo
] = expectation_validation_graph.get_exception_info(
metric_info=aborted_metrics_info
)
# Report all MetricResolutionError occurrences impacting expectation and append it to rejected list.
if len(metric_exception_info) > 0:
configuration = expectation_validation_graph.configuration
for exception_info in metric_exception_info:
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
evrs.append(result)
if configuration not in rejected_configurations:
rejected_configurations.append(configuration)
# Exclude all rejected expectations from list of expectations cleared for validation.
for configuration in rejected_configurations:
processed_configurations.remove(configuration)
except Exception as err:
# If a general Exception occurs during the execution of "Validator.resolve_validation_graph()", then all
# expectations in the suite are impacted, because it is impossible to attribute the failure to a metric.
if catch_exceptions:
exception_traceback = traceback.format_exc()
exception_message = str(err)
exception_info = ExceptionInfo(
**{
"exception_traceback": exception_traceback,
"exception_message": exception_message,
}
)
for configuration in processed_configurations:
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
evrs.append(result)
return evrs
else:
raise err
for configuration in processed_configurations:
try:
result = configuration.metrics_validate(
metrics,
execution_engine=self._execution_engine,
runtime_configuration=runtime_configuration,
)
evrs.append(result)
except Exception as err:
if catch_exceptions:
exception_traceback = traceback.format_exc()
exception_message = str(err)
exception_info = ExceptionInfo(
**{
"exception_traceback": exception_traceback,
"exception_message": exception_message,
}
)
result = ExpectationValidationResult(
success=False,
exception_info=exception_info,
expectation_config=configuration,
)
evrs.append(result)
else:
raise err
return evrs
def build_metric_dependency_graph(
self,
graph: ValidationGraph,
execution_engine: ExecutionEngine,
metric_configuration: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
runtime_configuration: Optional[dict] = None,
):
"""Obtain domain and value keys for metrics and proceeds to add these metrics to the validation graph
until all metrics have been added."""
metric_impl = get_metric_provider(
metric_configuration.metric_name, execution_engine=execution_engine
)[0]
metric_dependencies = metric_impl.get_evaluation_dependencies(
metric=metric_configuration,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if len(metric_dependencies) == 0:
graph.add(
MetricEdge(
left=metric_configuration,
)
)
else:
metric_configuration.metric_dependencies = metric_dependencies
for metric_dependency in metric_dependencies.values():
# TODO: <Alex>In the future, provide a more robust cycle detection mechanism.</Alex>
if metric_dependency.id == metric_configuration.id:
logger.warning(
f"Metric {str(metric_configuration.id)} has created a circular dependency"
)
continue
graph.add(
MetricEdge(
left=metric_configuration,
right=metric_dependency,
)
)
self.build_metric_dependency_graph(
graph=graph,
execution_engine=execution_engine,
metric_configuration=metric_dependency,
configuration=configuration,
runtime_configuration=runtime_configuration,
)
def resolve_validation_graph(
self,
graph: ValidationGraph,
metrics: Dict[Tuple[str, str, str], Any],
runtime_configuration: Optional[dict] = None,
) -> Dict[
Tuple[str, str, str],
Dict[str, Union[MetricConfiguration, Set[ExceptionInfo], int]],
]:
if runtime_configuration is None:
runtime_configuration = {}
if runtime_configuration.get("catch_exceptions", True):
catch_exceptions = True
else:
catch_exceptions = False
failed_metric_info: Dict[
Tuple[str, str, str],
Dict[str, Union[MetricConfiguration, Set[ExceptionInfo], int]],
] = {}
aborted_metrics_info: Dict[
Tuple[str, str, str],
Dict[str, Union[MetricConfiguration, Set[ExceptionInfo], int]],
] = {}
ready_metrics: Set[MetricConfiguration]
needed_metrics: Set[MetricConfiguration]
exception_info: ExceptionInfo
# noinspection SpellCheckingInspection
pbar = None
done: bool = False
while not done:
ready_metrics, needed_metrics = self._parse_validation_graph(
validation_graph=graph, metrics=metrics
)
if pbar is None:
# noinspection PyProtectedMember,SpellCheckingInspection
pbar = tqdm(
total=len(ready_metrics) + len(needed_metrics),
desc="Calculating Metrics",
disable=len(graph.edges) < 3,
)
pbar.update(0)
computable_metrics = set()
for metric in ready_metrics:
if (
metric.id in failed_metric_info
and failed_metric_info[metric.id]["num_failures"]
>= MAX_METRIC_COMPUTATION_RETRIES
):
aborted_metrics_info[metric.id] = failed_metric_info[metric.id]
else:
computable_metrics.add(metric)
try:
metrics.update(
self._resolve_metrics(
execution_engine=self._execution_engine,
metrics_to_resolve=computable_metrics,
metrics=metrics,
runtime_configuration=runtime_configuration,
)
)
pbar.update(len(computable_metrics))
except MetricResolutionError as err:
if catch_exceptions:
exception_traceback = traceback.format_exc()
exception_message = str(err)
exception_info = ExceptionInfo(
**{
"exception_traceback": exception_traceback,
"exception_message": exception_message,
}
)
for failed_metric in err.failed_metrics:
if failed_metric.id in failed_metric_info:
failed_metric_info[failed_metric.id]["num_failures"] += 1
failed_metric_info[failed_metric.id]["exception_info"].add(
exception_info
)
else:
failed_metric_info[failed_metric.id] = {}
failed_metric_info[failed_metric.id][
"metric_configuration"
] = failed_metric
failed_metric_info[failed_metric.id]["num_failures"] = 1
failed_metric_info[failed_metric.id]["exception_info"] = {
exception_info
}
else:
raise err
except Exception as e:
if catch_exceptions:
logger.error(
f"""Caught exception {str(e)} while trying to resolve a set of {len(ready_metrics)} metrics; \
aborting graph resolution.
"""
)
done = True
else:
raise e
if (len(ready_metrics) + len(needed_metrics) == 0) or (
len(ready_metrics) == len(aborted_metrics_info)
):
done = True
pbar.close()
return aborted_metrics_info
def append_expectation(self, expectation_config: ExpectationConfiguration) -> None:
"""This method is a thin wrapper for ExpectationSuite.append_expectation"""
warnings.warn(
"append_expectation is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.add_expectation instead.",
DeprecationWarning,
)
self._expectation_suite.append_expectation(expectation_config)
def find_expectation_indexes(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
) -> List[int]:
"""This method is a thin wrapper for ExpectationSuite.find_expectation_indexes"""
warnings.warn(
"find_expectation_indexes is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectation_indexes(
expectation_configuration=expectation_configuration, match_type=match_type
)
def find_expectations(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
ge_cloud_id: Optional[str] = None,
) -> List[ExpectationConfiguration]:
"""This method is a thin wrapper for ExpectationSuite.find_expectations()"""
warnings.warn(
"find_expectations is deprecated, and will be removed in a future release. "
+ "Please use ExpectationSuite.find_expectation_indexes instead.",
DeprecationWarning,
)
return self._expectation_suite.find_expectations(
expectation_configuration=expectation_configuration,
match_type=match_type,
ge_cloud_id=ge_cloud_id,
)
def remove_expectation(
self,
expectation_configuration: ExpectationConfiguration,
match_type: str = "domain",
remove_multiple_matches: bool = False,
ge_cloud_id: Optional[str] = None,
) -> List[ExpectationConfiguration]:
return self._expectation_suite.remove_expectation(
expectation_configuration=expectation_configuration,
match_type=match_type,
remove_multiple_matches=remove_multiple_matches,
ge_cloud_id=ge_cloud_id,
)
def set_config_value(self, key, value) -> None:
"""Setter for config value"""
self._validator_config[key] = value
def get_config_value(self, key):
"""Getter for config value"""
return self._validator_config.get(key)
def load_batch_list(self, batch_list: List[Batch]) -> List[Batch]:
for batch in batch_list:
try:
assert isinstance(
batch, Batch
), "batches provided to Validator must be Great Expectations Batch objects"
except AssertionError as e:
logger.warning(str(e))
self._execution_engine.load_batch_data(batch.id, batch.data)
self._batches[batch.id] = batch
# We set the active_batch_id in each iteration of the loop to keep in sync with the active_batch_id for the
# execution_engine. The final active_batch_id will be that of the final batch loaded.
self.active_batch_id = batch.id
return batch_list
@property
def batches(self) -> Dict[str, Batch]:
"""Getter for batches"""
return self._batches
@property
def loaded_batch_ids(self) -> List[str]:
return self.execution_engine.loaded_batch_data_ids
@property
def active_batch(self) -> Optional[Batch]:
"""Getter for active batch"""
active_batch_id: Optional[str] = self.active_batch_id
batch: Optional[Batch] = (
self.batches.get(active_batch_id) if active_batch_id else None
)
return batch
@property
def active_batch_spec(self) -> Optional[BatchSpec]:
"""Getter for active batch's batch_spec"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_spec
@property
def active_batch_id(self) -> Optional[str]:
"""Getter for active batch id"""
active_engine_batch_id = self._execution_engine.active_batch_data_id
if active_engine_batch_id != self._active_batch_id:
logger.debug(
"This validator has a different active batch id than its Execution Engine."
)
return self._active_batch_id
@active_batch_id.setter
def active_batch_id(self, batch_id: str) -> None:
assert set(self.batches.keys()).issubset(set(self.loaded_batch_ids))
available_batch_ids: Set[str] = set(self.batches.keys()).union(
set(self.loaded_batch_ids)
)
if batch_id not in available_batch_ids:
raise ValueError(
f"""batch_id {batch_id} not found in loaded batches. Batches must first be loaded before they can be \
set as active.
"""
)
else:
self._active_batch_id = batch_id
@property
def active_batch_markers(self) -> Optional[BatchMarkers]:
"""Getter for active batch's batch markers"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_markers
@property
def active_batch_definition(self) -> Optional[BatchDefinition]:
"""Getter for the active batch's batch definition"""
if not self.active_batch:
return None
else:
return self.active_batch.batch_definition
def discard_failing_expectations(self) -> None:
"""Removes any expectations from the validator where the validation has failed"""
res = self.validate(only_return_failures=True).results
if any(res):
for item in res:
self.remove_expectation(
expectation_configuration=item.expectation_config,
match_type="runtime",
)
warnings.warn("Removed %s expectations that were 'False'" % len(res))
def get_default_expectation_arguments(self) -> dict:
"""Fetch default expectation arguments for this data_asset
Returns:
A dictionary containing all the current default expectation arguments for a data_asset
Ex::
{
"include_config" : True,
"catch_exceptions" : False,
"result_format" : 'BASIC'
}
See also:
set_default_expectation_arguments
"""
return self.default_expectation_args
@property
def ge_cloud_mode(self) -> bool:
"""
Wrapper around ge_cloud_mode property of associated Data Context
"""
if self._data_context:
return self._data_context.ge_cloud_mode
return False
@property
def default_expectation_args(self) -> dict:
"""A getter for default Expectation arguments"""
return self._default_expectation_args
def set_default_expectation_argument(self, argument: str, value) -> None:
"""
Set a default expectation argument for this data_asset
Args:
argument (string): The argument to be replaced
value : The New argument to use for replacement
Returns:
None
See also:
get_default_expectation_arguments
"""
self._default_expectation_args[argument] = value
def get_expectations_config(
self,
discard_failed_expectations: bool = True,
discard_result_format_kwargs: bool = True,
discard_include_config_kwargs: bool = True,
discard_catch_exceptions_kwargs: bool = True,
suppress_warnings: bool = False,
) -> ExpectationSuite:
"""
Returns an expectation configuration, providing an option to discard failed expectation and discard/ include'
different result aspects, such as exceptions and result format.
"""
warnings.warn(
"get_expectations_config is deprecated, and will be removed in a future release. "
+ "Please use get_expectation_suite instead.",
DeprecationWarning,
)
return self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
def get_expectation_suite(
self,
discard_failed_expectations: bool = True,
discard_result_format_kwargs: bool = True,
discard_include_config_kwargs: bool = True,
discard_catch_exceptions_kwargs: bool = True,
suppress_warnings: bool = False,
suppress_logging: bool = False,
) -> ExpectationSuite:
"""Returns _expectation_config as a JSON object, and perform some cleaning along the way.
Args:
discard_failed_expectations (boolean): \
Only include expectations with success_on_last_run=True in the exported config. Defaults to `True`.
discard_result_format_kwargs (boolean): \
In returned expectation objects, suppress the `result_format` parameter. Defaults to `True`.
discard_include_config_kwargs (boolean): \
In returned expectation objects, suppress the `include_config` parameter. Defaults to `True`.
discard_catch_exceptions_kwargs (boolean): \
In returned expectation objects, suppress the `catch_exceptions` parameter. Defaults to `True`.
suppress_warnings (boolean): \
If true, do not include warnings in logging information about the operation.
suppress_logging (boolean): \
If true, do not create a log entry (useful when using get_expectation_suite programmatically)
Returns:
An expectation suite.
Note:
get_expectation_suite does not affect the underlying expectation suite at all. The returned suite is a \
copy of _expectation_suite, not the original object.
"""
expectation_suite = copy.deepcopy(self._expectation_suite)
expectations = expectation_suite.expectations
discards = defaultdict(int)
if discard_failed_expectations:
new_expectations = []
for expectation in expectations:
# Note: This is conservative logic.
# Instead of retaining expectations IFF success==True, it discard expectations IFF success==False.
# In cases where expectation.success is missing or None, expectations are *retained*.
# Such a case could occur if expectations were loaded from a config file and never run.
if expectation.success_on_last_run is False:
discards["failed_expectations"] += 1
else:
new_expectations.append(expectation)
expectations = new_expectations
message = "\t%d expectation(s) included in expectation_suite." % len(
expectations
)
if discards["failed_expectations"] > 0 and not suppress_warnings:
message += (
" Omitting %d expectation(s) that failed when last run; set "
"discard_failed_expectations=False to include them."
% discards["failed_expectations"]
)
for expectation in expectations:
# FIXME: Factor this out into a new function. The logic is duplicated in remove_expectation,
# which calls _copy_and_clean_up_expectation
expectation.success_on_last_run = None
if discard_result_format_kwargs:
if "result_format" in expectation.kwargs:
del expectation.kwargs["result_format"]
discards["result_format"] += 1
if discard_include_config_kwargs:
if "include_config" in expectation.kwargs:
del expectation.kwargs["include_config"]
discards["include_config"] += 1
if discard_catch_exceptions_kwargs:
if "catch_exceptions" in expectation.kwargs:
del expectation.kwargs["catch_exceptions"]
discards["catch_exceptions"] += 1
settings_message = ""
if discards["result_format"] > 0 and not suppress_warnings:
settings_message += " result_format"
if discards["include_config"] > 0 and not suppress_warnings:
settings_message += " include_config"
if discards["catch_exceptions"] > 0 and not suppress_warnings:
settings_message += " catch_exceptions"
if (
len(settings_message) > 1
): # Only add this if we added one of the settings above.
settings_message += " settings filtered."
expectation_suite.expectations = expectations
if not suppress_logging:
logger.info(message + settings_message)
return expectation_suite
def save_expectation_suite(
self,
filepath: Optional[str] = None,
discard_failed_expectations: bool = True,
discard_result_format_kwargs: bool = True,
discard_include_config_kwargs: bool = True,
discard_catch_exceptions_kwargs: bool = True,
suppress_warnings: bool = False,
) -> None:
"""Writes ``_expectation_config`` to a JSON file.
Writes the DataAsset's expectation config to the specified JSON ``filepath``. Failing expectations \
can be excluded from the JSON expectations config with ``discard_failed_expectations``. The kwarg key-value \
pairs :ref:`result_format`, :ref:`include_config`, and :ref:`catch_exceptions` are optionally excluded from \
the JSON expectations config.
Args:
filepath (string): \
The location and name to write the JSON config file to.
discard_failed_expectations (boolean): \
If True, excludes expectations that do not return ``success = True``. \
If False, all expectations are written to the JSON config file.
discard_result_format_kwargs (boolean): \
If True, the :ref:`result_format` attribute for each expectation is not written to the JSON config \
file.
discard_include_config_kwargs (boolean): \
If True, the :ref:`include_config` attribute for each expectation is not written to the JSON config \
file.
discard_catch_exceptions_kwargs (boolean): \
If True, the :ref:`catch_exceptions` attribute for each expectation is not written to the JSON \
config file.
suppress_warnings (boolean): \
If True, all warnings raised by Great Expectations, as a result of dropped expectations, are \
suppressed.
"""
expectation_suite = self.get_expectation_suite(
discard_failed_expectations,
discard_result_format_kwargs,
discard_include_config_kwargs,
discard_catch_exceptions_kwargs,
suppress_warnings,
)
if filepath is None and self._data_context is not None:
self._data_context.save_expectation_suite(expectation_suite)
if self.ge_cloud_mode:
updated_suite = self._data_context.get_expectation_suite(
ge_cloud_id=str(expectation_suite.ge_cloud_id)
)
self._initialize_expectations(expectation_suite=updated_suite)
elif filepath is not None:
with open(filepath, "w") as outfile:
json.dump(
expectationSuiteSchema.dump(expectation_suite),
outfile,
indent=2,
sort_keys=True,
)
else:
raise ValueError(
"Unable to save config: filepath or data_context must be available."
)
# TODO: <Alex>Should "include_config" also be an argument of this method?</Alex>
def validate(
self,
expectation_suite=None,
run_id=None,
data_context: Optional[
Any
] = None, # Cannot type DataContext due to circular import
evaluation_parameters: Optional[dict] = None,
catch_exceptions: bool = True,
result_format: Optional[str] = None,
only_return_failures: bool = False,
run_name: Optional[str] = None,
run_time: Optional[str] = None,
) -> Union[ExpectationValidationResult, ExpectationSuiteValidationResult]:
# noinspection SpellCheckingInspection
"""Generates a JSON-formatted report describing the outcome of all expectations.
Use the default expectation_suite=None to validate the expectations config associated with the DataAsset.
Args:
expectation_suite (json or None): \
If None, uses the expectations config generated with the DataAsset during the current session. \
If a JSON file, validates those expectations.
run_id (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
run_name (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
run_time (str): \
Used to identify this validation result as part of a collection of validations. \
See DataContext for more information.
data_context (DataContext): \
A datacontext object to use as part of validation for binding evaluation parameters and \
registering validation results.
evaluation_parameters (dict or None): \
If None, uses the evaluation_paramters from the expectation_suite provided or as part of the \
data_asset. If a dict, uses the evaluation parameters in the dictionary.
catch_exceptions (boolean): \
If True, exceptions raised by tests will not end validation and will be described in the returned \
report.
result_format (string or None): \
If None, uses the default value ('BASIC' or as specified). \
If string, the returned expectation output follows the specified format ('BOOLEAN_ONLY','BASIC', \
etc.).
only_return_failures (boolean): \
If True, expectation results are only returned when ``success = False`` \
Returns:
A JSON-formatted dictionary containing a list of the validation results. \
An example of the returned format::
{
"results": [
{
"unexpected_list": [unexpected_value_1, unexpected_value_2],
"expectation_type": "expect_*",
"kwargs": {
"column": "Column_Name",
"output_format": "SUMMARY"
},
"success": true,
"raised_exception: false.
"exception_traceback": null
},
{
... (Second expectation results)
},
... (More expectations results)
],
"success": true,
"statistics": {
"evaluated_expectations": n,
"successful_expectations": m,
"unsuccessful_expectations": n - m,
"success_percent": m / n
}
}
Notes:
If the configuration object was built with a different version of great expectations then the \
current environment. If no version was found in the configuration file.
Raises:
AttributeError - if 'catch_exceptions'=None and an expectation throws an AttributeError
"""
# noinspection PyUnusedLocal
try:
validation_time = datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
self._active_validation = True
# If a different validation data context was provided, override
validation_data_context = self._data_context
if data_context is None and self._data_context is not None:
data_context = self._data_context
elif data_context is not None:
# temporarily set self._data_context so it is used inside the expectation decorator
self._data_context = data_context
if expectation_suite is None:
expectation_suite = self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_include_config_kwargs=False,
discard_catch_exceptions_kwargs=False,
)
elif isinstance(expectation_suite, str):
try:
with open(expectation_suite) as infile:
expectation_suite = expectationSuiteSchema.loads(infile.read())
except ValidationError:
raise
except OSError:
raise GreatExpectationsError(
"Unable to load expectation suite: IO error while reading %s"
% expectation_suite
)
elif not isinstance(expectation_suite, ExpectationSuite):
logger.error(
"Unable to validate using the provided value for expectation suite; does it need to be "
"loaded from a dictionary?"
)
if getattr(data_context, "_usage_statistics_handler", None):
# noinspection PyProtectedMember
handler = data_context._usage_statistics_handler
# noinspection PyProtectedMember
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(
self
),
success=False,
)
return ExpectationValidationResult(success=False)
# Evaluation parameter priority is
# 1. from provided parameters
# 2. from expectation configuration
# 3. from data context
# So, we load them in reverse order
if data_context is not None:
runtime_evaluation_parameters = (
data_context.evaluation_parameter_store.get_bind_params(run_id)
)
else:
runtime_evaluation_parameters = {}
if expectation_suite.evaluation_parameters:
runtime_evaluation_parameters.update(
expectation_suite.evaluation_parameters
)
if evaluation_parameters is not None:
runtime_evaluation_parameters.update(evaluation_parameters)
# Convert evaluation parameters to be json-serializable
runtime_evaluation_parameters = recursively_convert_to_json_serializable(
runtime_evaluation_parameters
)
# Warn if our version is different from the version in the configuration
# TODO: Deprecate "great_expectations.__version__"
# noinspection PyUnusedLocal
suite_ge_version = expectation_suite.meta.get(
"great_expectations_version"
) or expectation_suite.meta.get("great_expectations.__version__")
# Group expectations by column
columns = {}
for expectation in expectation_suite.expectations:
expectation.process_evaluation_parameters(
evaluation_parameters=runtime_evaluation_parameters,
interactive_evaluation=self.interactive_evaluation,
data_context=self._data_context,
)
if "column" in expectation.kwargs and isinstance(
expectation.kwargs["column"], Hashable
):
column = expectation.kwargs["column"]
else:
# noinspection SpellCheckingInspection
column = "_nocolumn"
if column not in columns:
columns[column] = []
columns[column].append(expectation)
expectations_to_evaluate = []
for col in columns:
expectations_to_evaluate.extend(columns[col])
runtime_configuration = self._get_runtime_configuration(
catch_exceptions=catch_exceptions, result_format=result_format
)
results = self.graph_validate(
configurations=expectations_to_evaluate,
runtime_configuration=runtime_configuration,
)
statistics = _calc_validation_statistics(results)
if only_return_failures:
abbrev_results = []
for exp in results:
if not exp.success:
abbrev_results.append(exp)
results = abbrev_results
expectation_suite_name = expectation_suite.expectation_suite_name
result = ExpectationSuiteValidationResult(
results=results,
success=statistics.success,
statistics={
"evaluated_expectations": statistics.evaluated_expectations,
"successful_expectations": statistics.successful_expectations,
"unsuccessful_expectations": statistics.unsuccessful_expectations,
"success_percent": statistics.success_percent,
},
evaluation_parameters=runtime_evaluation_parameters,
meta={
"great_expectations_version": ge_version,
"expectation_suite_name": expectation_suite_name,
"run_id": run_id,
"batch_spec": self.active_batch_spec,
"batch_markers": self.active_batch_markers,
"active_batch_definition": self.active_batch_definition,
"validation_time": validation_time,
},
)
self._data_context = validation_data_context
except Exception as e:
if getattr(data_context, "_usage_statistics_handler", None):
# noinspection PyProtectedMember
handler = data_context._usage_statistics_handler
# noinspection PyProtectedMember
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=False,
)
raise
finally:
self._active_validation = False
if getattr(data_context, "_usage_statistics_handler", None):
# noinspection PyProtectedMember
handler = data_context._usage_statistics_handler
# noinspection PyProtectedMember
handler.send_usage_message(
event="data_asset.validate",
event_payload=handler._batch_anonymizer.anonymize_batch_info(self),
success=True,
)
return result
def get_evaluation_parameter(self, parameter_name, default_value=None):
"""
Get an evaluation parameter value that has been stored in meta.
Args:
parameter_name (string): The name of the parameter to store.
default_value (any): The default value to be returned if the parameter is not found.
Returns:
The current value of the evaluation parameter.
"""
if parameter_name in self._expectation_suite.evaluation_parameters:
return self._expectation_suite.evaluation_parameters[parameter_name]
else:
return default_value
def set_evaluation_parameter(self, parameter_name, parameter_value):
"""
Provide a value to be stored in the data_asset evaluation_parameters object and used to evaluate
parameterized expectations.
Args:
parameter_name (string): The name of the kwarg to be replaced at evaluation time
parameter_value (any): The value to be used
"""
self._expectation_suite.evaluation_parameters.update(
{parameter_name: parameter_value}
)
def add_citation(
self,
comment: str,
batch_spec: Optional[dict] = None,
batch_markers: Optional[dict] = None,
batch_definition: Optional[dict] = None,
citation_date: Optional[str] = None,
) -> None:
"""Adds a citation to an existing Expectation Suite within the validator"""
if batch_spec is None:
batch_spec = self.batch_spec
if batch_markers is None:
batch_markers = self.active_batch_markers
if batch_definition is None:
batch_definition = self.active_batch_definition
self._expectation_suite.add_citation(
comment,
batch_spec=batch_spec,
batch_markers=batch_markers,
batch_definition=batch_definition,
citation_date=citation_date,
)
@property
def expectation_suite_name(self) -> str:
"""Gets the current expectation_suite name of this data_asset as stored in the expectations configuration."""
return self._expectation_suite.expectation_suite_name
@expectation_suite_name.setter
def expectation_suite_name(self, expectation_suite_name: str) -> None:
"""Sets the expectation_suite name of this data_asset as stored in the expectations configuration."""
self._expectation_suite.expectation_suite_name = expectation_suite_name
def test_expectation_function(
self, function: Callable, *args, **kwargs
) -> Callable:
"""Test a generic expectation function
Args:
function (func): The function to be tested. (Must be a valid expectation function.)
*args : Positional arguments to be passed the the function
**kwargs : Keyword arguments to be passed the the function
Returns:
A JSON-serializable expectation result object.
Notes:
This function is a thin layer to allow quick testing of new expectation functions, without having to \
define custom classes, etc. To use developed expectations from the command-line tool, you will still need \
to define custom classes, etc.
Check out :ref:`how_to_guides__creating_and_editing_expectations__how_to_create_custom_expectations` for
more information.
"""
# noinspection SpellCheckingInspection
argspec = inspect.getfullargspec(function)[0][1:]
new_function = self.expectation(argspec)(function)
return new_function(self, *args, **kwargs)
def columns(self, domain_kwargs: Optional[Dict[str, Any]] = None) -> List[str]:
if domain_kwargs is None:
domain_kwargs = {
"batch_id": self.execution_engine.active_batch_data_id,
}
columns: List[str] = self.get_metric(
metric=MetricConfiguration(
metric_name="table.columns",
metric_domain_kwargs=domain_kwargs,
)
)
return columns
def head(
self,
n_rows: int = 5,
domain_kwargs: Optional[Dict[str, Any]] = None,
fetch_all: bool = False,
) -> pd.DataFrame:
if domain_kwargs is None:
domain_kwargs = {
"batch_id": self.execution_engine.active_batch_data_id,
}
data: Any = self.get_metric(
metric=MetricConfiguration(
metric_name="table.head",
metric_domain_kwargs=domain_kwargs,
metric_value_kwargs={
"n_rows": n_rows,
"fetch_all": fetch_all,
},
)
)
df: pd.DataFrame
if isinstance(
self.execution_engine, (PandasExecutionEngine, SqlAlchemyExecutionEngine)
):
df = pd.DataFrame(data=data)
elif isinstance(self.execution_engine, SparkDFExecutionEngine):
rows: List[Dict[str, Any]] = [datum.asDict() for datum in data]
df = | pd.DataFrame(data=rows) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
| tm.assert_frame_equal(df - df2, -1 * exp) | pandas.util.testing.assert_frame_equal |
from time import time
from os import path, listdir
from datetime import timedelta
from datetime import date as dt_date
from datetime import datetime as dt
from numpy import cumprod
from pandas import DataFrame, read_sql_query, read_csv, concat
from functions import psqlEngine
class Investments():
def __init__(self, path = '../investments/', name = 'get_investments', **kwargs):
self.kwargs = kwargs
self.path = path
self.hyperparameters()
self.get_engine()
self.get_dollar()
self.get_all_assets()
self.domestic_bond_returns()
self.get_benchmarks()
self.portfolio_domestic_stocks = self.get_quotas('domestic_stocks')
self.portfolio_international_stocks = self.get_quotas('international_stocks')
self.portfolio_crypto = self.get_quotas('crypto')
# self.portfolio_domestic_options = self.get_quotas('domestic_options')
self.portfolio_domestic_funds = self.get_quotas('domestic_funds')
self.get_portfolio()
self.get_aggregate()
self.get_time_series()
self.dispose_engine()
def __call__(self, flag = 'assets'):
if flag == 'dollar':
return self.dollar
if flag == 'bonds':
return self.domestic_bonds, self.interests
if flag == 'stocks':
return self.domestic_tickers, self.international_tickers
if flag == 'crypto':
return self.crypto, self.fractions
if flag == 'portfolio':
return self.portfolio, self.portfolio_aggregate.round(2)
if flag == 'save':
rounded = self.portfolio.round(2)
rounded2 = self.portfolio_aggregate.round(2)
engine = psqlEngine(self.database)
connection = engine.connect()
rounded.to_sql('portfolio', connection, if_exists = 'replace', index = False)
rounded2.to_sql('aggregate', connection, if_exists = 'replace', index = False)
connection.close()
engine.dispose()
if flag == 'time_series':
return self.portfolio_time_series.round(2)
def hyperparameters(self):
self.database = self.kwargs.get('database', 'database.ini')
self.benchmark_database = self.kwargs.get('benchmarks_database', 'benchmarks')
self.domestic_stocks_database = self.kwargs.get('domestic_database', 'brazil_stocks')
self.domestic_options_database = self.kwargs.get('domestic_database', 'brazil_options')
self.international_database = self.kwargs.get('international_database', 'usa_stocks')
self.currency_database = self.kwargs.get('currency_database', 'currencies')
self.domestic_bonds_path = '{}bonds/'.format(self.path)
self.crypto_path = '{}crypto/'.format(self.path)
self.domestic_stocks_path = '{}stocks/domestic/'.format(self.path)
self.international_stocks_path = '{}stocks/international/'.format(self.path)
self.domestic_options_path = '{}options/domestic/'.format(self.path)
self.domestic_funds_path = '{}funds/domestic/'.format(self.path)
self.list_paths = [
self.domestic_bonds_path,
self.crypto_path,
self.domestic_stocks_path,
self.international_stocks_path,
self.domestic_options_path,
self.domestic_funds_path,
]
self.dates_min = DataFrame()
def get_engine(self):
self.engine = psqlEngine(self.database)
self.connection = self.engine.connect()
def dispose_engine(self):
self.connection.close()
self.engine.dispose()
def get_dollar(self):
currency = 'BRLUSD'
self.dollar = float(read_sql_query("SELECT * FROM {} WHERE ticker = '{}'".format(self.benchmark_database, currency), self.connection).iloc[0].close)
self.dollar_full = read_sql_query("SELECT date, close FROM {} WHERE ticker = '{}' ORDER BY date".format(self.benchmark_database, currency), self.connection)
self.dollar_full.drop_duplicates('date', inplace = True)
self.dollar_full = self.insert_weekends(self.dollar_full)
self.dollar_full.rename(columns = {'close': 'dollar_close'}, inplace = True)
self.dollar_full['dollar_close'] = self.dollar_full.dollar_close.astype('float')
def get_benchmarks(self):
self.spy = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'SPY' ORDER BY date".format(self.benchmark_database), self.connection)
self.bova = read_sql_query("SELECT date, adjusted_close as close FROM {} WHERE ticker = 'BOVA11' ORDER BY date".format(self.benchmark_database), self.connection)
self.spy.drop_duplicates('date', inplace = True)
self.bova.drop_duplicates('date', inplace = True)
self.spy = self.insert_weekends(self.spy)
self.spy['close'] = self.spy.close.astype('float')
self.bova = self.insert_weekends(self.bova)
self.bova = self.bova.merge(self.dollar_full, on = 'date')
self.bova['close'] = self.bova.close.astype('float')
self.bova['close_dollar'] = (self.bova.close * self.bova.dollar_close).to_list()
def get_all_assets(self):
self.interests, self.fractions = list(), list()
self.domestic_tickers, self.international_tickers = list(), list()
self.domestic_options_tickers = list()
self.domestic_funds_tickers = list()
for directory in self.list_paths:
list_files = list()
for filename in listdir(directory):
if filename.endswith('.csv'):
list_files.append(path.join(directory, filename))
if directory == self.domestic_bonds_path:
self.interests.append(filename.replace('.csv', '').upper())
if directory == self.crypto_path:
self.fractions.append(filename.replace('.csv', '').upper())
if directory == self.domestic_stocks_path:
self.domestic_tickers.append(filename.replace('.csv', '').upper())
if directory == self.international_stocks_path:
self.international_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_options_path:
self.domestic_options_tickers.append(filename.replace('.csv', '').upper())
if directory == self.domestic_funds_path:
self.domestic_funds_tickers.append(filename.replace('.csv', '').upper())
dictionary = dict()
if directory == self.domestic_bonds_path:
for filename, interest in zip(list_files, self.interests):
df = | read_csv(filename) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# Loading the json with the grade data:
# In[1]:
import json
with open('grades.json', 'rb') as f:
data = json.load(f)
# Extracting the relevant information out of the json for one course:
# In[2]:
build_dict = lambda course: {
'id': course['content']['achievementDto']['cpCourseLibDto']['id'],
'course_name_de': course['content']['achievementDto']['cpCourseLibDto']['courseTitle']['value'],
'course_name_en': course['content']['achievementDto']['cpCourseLibDto']['courseTitle']['translations']['translation'][1]['value'],
'course_number': course['content']['achievementDto']['cpCourseLibDto']['courseNumber']['courseNumber'],
'ects': course['content']['achievementDto']['cpCourseLibDto']['ectsCredits'],
'semester_code': course['content']['achievementDto']['semesterLibDto']['key'],
'semester_name_de': course['content']['achievementDto']['semesterLibDto']['semesterDesignation']
['value'],
'semester_name_en': course['content']['achievementDto']['semesterLibDto']['semesterDesignation']
['translations']['translation'][1]['value'],
'semester_start_date': course['content']['achievementDto']['semesterLibDto']['startOfAcademicSemester']['value'],
'semester_end_date': course['content']['achievementDto']['semesterLibDto']['endOfAcademicSemester']['value'],
'grade_date': course['content']['achievementDto']['achievementDate']['value'],
'grade_name_de': course['content']['achievementDto']['gradeDto']['name']['value'],
'grade_name_en': course['content']['achievementDto']['gradeDto']['name']['translations']['translation'][1]['value'],
'grade': course['content']['achievementDto']['gradeDto']['value'],
}
# Creating a list of dicts, each dict containing the info for one course.
# In[3]:
dicts = [build_dict(course) for course in data['resource']]
# For each course, parse the grades out of its html file, and add to its dict:
# In[4]:
from bs4 import BeautifulSoup
possible_grades = ['1.0', '1.3', '1.4', '1.7', '2.0', '2.3', '2.4', '2.7', '3.0', '3.3', '3.4', '3.7', '4.0', '4.3', '4.7', '5.0']
standard_possible_grades = ['1.0', '1.3', '1.7', '2.0', '2.3', '2.7', '3.0', '3.3', '3.7', '4.0', '4.3', '4.7', '5.0']
all_possible_grades = possible_grades + ['did_not_show_up']
for d in reversed(dicts): # iterating in reverse order so we can remove elements while iterating.
# University regulation: written exams from first semester are weighted half the points.
d['grade_weight'] = d['ects']
if ('Discrete Structures' in d['course_name_en']
or 'Introduction to Informatics' in d['course_name_en']
or 'Computer Organization' in d['course_name_en']
):
d['grade_weight'] >>= 1 # divide by 2 but leave as int (known to all be devisable by 2)
# read the html file to a string
try:
with open('stats/{}.html'.format(d['id']), 'rb') as f:
html_doc = f.read()
soup = BeautifulSoup(html_doc, 'html.parser')
# the data can be found in the titles of div objects with the class "kandcountbox"
divs = soup.find_all('div', 'kandcountbox')
titles = [div['title'] for div in divs]
# A list of tuples (<grade>, <number of students>) e.g. ('1.0', 3)
nums = [(ts[-1].split()[0], int(ts[-2].split()[0])) for t in titles if (ts := t.split(','))]
d.update((grade, 0) for grade in all_possible_grades) # All courses get all grades, also if 0 students that grade.
for i, t in enumerate(titles):
if 'Nicht erschienen' in t: # Students who did not show up
d['did_not_show_up'] = nums[i][1]
elif '5.0' in t: # add up fails and cheats together.
d['5.0'] += nums[i][1]
# We already counted all the 5.0s and added them, so don't add again.
d.update((tup for tup in nums if tup[0] != '5.0'))
except FileNotFoundError:
print("No statistics file for ", d['course_name_en'])
dicts.remove(d)
# Create a pandas dataframe with the data:
# In[5]:
import pandas as pd
df = pd.DataFrame(dicts)
df['did_show_up'] = df[possible_grades].sum(axis=1)
df['numeric_grade'] = pd.to_numeric(df['grade'])
df['int_grade_X10'] = df['grade'].apply(lambda x: int((x.replace('.', '') + '0')[:2]))
df['5.0_with_noshows'] = df['5.0'] + df['did_not_show_up']
df['total_students'] = df['did_show_up'] + df['did_not_show_up']
df['grade'] = df['grade'].apply(lambda s: (s + '.0')[:3])
grades_with_noshows = possible_grades + ['5.0_with_noshows']
grades_with_noshows.remove('5.0')
with | pd.option_context('display.max_rows', None, 'display.max_columns', None) | pandas.option_context |
# -*- coding: UTF-8 -*-
# 导出抽卡记录 by HansenH
ADDRESS = 'https://hk4e-api.mihoyo.com/event/gacha_info/api/getGachaLog'
TIMESLEEP = 0.2
PAGE_SIZE = 6
import json
import time
from datetime import datetime
import requests
import pandas
from urllib import parse
def export_log(url):
resolved = dict(parse.parse_qsl(parse.urlsplit(url.strip()).query))
params = {
'authkey_ver': resolved['authkey_ver'],
'sign_type': resolved['sign_type'],
'auth_appid': resolved['auth_appid'],
'init_type': resolved['init_type'],
'gacha_id': resolved['gacha_id'],
'timestamp': resolved['timestamp'],
'lang': resolved['lang'],
'device_type': resolved['device_type'],
'ext': resolved['ext'],
'game_version': resolved['game_version'],
'region': resolved['region'],
'authkey': resolved['authkey'],
'game_biz': resolved['game_biz'],
'gacha_type': '',
'page': '1',
'size': str(PAGE_SIZE),
'end_id': '0'
}
# 角色祈愿
character_wishes = []
params['gacha_type'] = '301'
params['page'] = '1'
params['end_id'] = '0'
while True:
r = requests.get(ADDRESS, params=params)
if r.status_code != 200:
print('ERROR')
return
response = json.loads(r.text)
if response['message'].upper() != 'OK':
print('ERROR')
return
character_wishes += response['data']['list']
if len(response['data']['list']) < PAGE_SIZE:
break
params['page'] = str(int(params['page']) + 1)
params['end_id'] = response['data']['list'][-1]['id']
time.sleep(TIMESLEEP)
print('角色祈愿', len(character_wishes))
print(character_wishes)
# 武器祈愿
weapon_wishes = []
params['gacha_type'] = '302'
params['page'] = '1'
params['end_id'] = '0'
while True:
r = requests.get(ADDRESS, params=params)
if r.status_code != 200:
print('ERROR')
return
response = json.loads(r.text)
if response['message'].upper() != 'OK':
print('ERROR')
return
weapon_wishes += response['data']['list']
if len(response['data']['list']) < PAGE_SIZE:
break
params['page'] = str(int(params['page']) + 1)
params['end_id'] = response['data']['list'][-1]['id']
time.sleep(TIMESLEEP)
print('武器祈愿', len(weapon_wishes))
print(weapon_wishes)
# 常驻祈愿
standard_wishes = []
params['gacha_type'] = '200'
params['page'] = '1'
params['end_id'] = '0'
while True:
r = requests.get(ADDRESS, params=params)
if r.status_code != 200:
print('ERROR')
return
response = json.loads(r.text)
if response['message'].upper() != 'OK':
print('ERROR')
return
standard_wishes += response['data']['list']
if len(response['data']['list']) < PAGE_SIZE:
break
params['page'] = str(int(params['page']) + 1)
params['end_id'] = response['data']['list'][-1]['id']
time.sleep(TIMESLEEP)
print('常驻祈愿', len(standard_wishes))
print(standard_wishes)
# 新手祈愿
novice_wishes = []
params['gacha_type'] = '100'
params['page'] = '1'
params['end_id'] = '0'
while True:
r = requests.get(ADDRESS, params=params)
if r.status_code != 200:
print('ERROR')
return
response = json.loads(r.text)
if response['message'].upper() != 'OK':
print('ERROR')
return
novice_wishes += response['data']['list']
if len(response['data']['list']) < PAGE_SIZE:
break
params['page'] = str(int(params['page']) + 1)
params['end_id'] = response['data']['list'][-1]['id']
time.sleep(TIMESLEEP)
print('新手祈愿', len(novice_wishes))
print(novice_wishes)
# 整理数据
data1 = []
for i in range(len(character_wishes)-1, -1, -1):
data1.append({
'类型': character_wishes[i]['item_type'],
'名称': character_wishes[i]['name'],
'星级': character_wishes[i]['rank_type'],
'时间': character_wishes[i]['time']
})
df1 = pandas.DataFrame(data1, columns=('类型', '名称', '星级', '时间'))
data2 = []
for i in range(len(weapon_wishes)-1, -1, -1):
data2.append({
'类型': weapon_wishes[i]['item_type'],
'名称': weapon_wishes[i]['name'],
'星级': weapon_wishes[i]['rank_type'],
'时间': weapon_wishes[i]['time']
})
df2 = pandas.DataFrame(data2, columns=('类型', '名称', '星级', '时间'))
data3 = []
for i in range(len(standard_wishes)-1, -1, -1):
data3.append({
'类型': standard_wishes[i]['item_type'],
'名称': standard_wishes[i]['name'],
'星级': standard_wishes[i]['rank_type'],
'时间': standard_wishes[i]['time']
})
df3 = | pandas.DataFrame(data3, columns=('类型', '名称', '星级', '时间')) | pandas.DataFrame |
##################################### PLA ALGORITHM - ON RAW DATA WITH 256 FEATURES AND ALSO MAKING PREDICTIONS ON TEST RAW DATA ##################################3
import numpy as np
from random import choice
from numpy import array, dot, random
import matplotlib.pyplot as plt
import pandas as pd
import sklearn
# Initially, the data is read from internet as given below, written to a csv file in local machine as given below, to decrease dependency of this code on internet
# train = pd.read_csv('http://amlbook.com/data/zip/zip.train', delimiter=' ', header=None)
# train.to_csv('train.csv', index=False)
# Load CSV file from the local machine
train = pd.read_csv('C:/Users/syedd/train.csv', delimiter=',', header='infer')
train.shape
del train['257']
train.shape
print("WORKING WITH THE RAW DATA THAT HAS 256 FEATURES CONTAINING GREY SCALE VALUES FOR 256 PIXELS")
print("============================================================================================ \n \n")
print("PLA ALGORITHM")
print("=============\n \n")
print("Training set has {0[0]} rows and {0[1]} columns".format(train.shape))
# Store the labels in a vector. Here trainlabels is 'y' in our formula
trainlabels = train['0']
#Labels should be integer, not float numbers
trainlabels=trainlabels.astype(int)
del train['0']
# The data should be represented in matrix form
train.shape
traindata = np.asmatrix(train.loc[:,:])
traindata.shape
# Visualising a digit using the 256 grey scale values of the 16 by 16 pixels
#Taking a sample row
samplerow = traindata[21:22]
#reshape it to 16*16 grid
samplerow = np.reshape(samplerow,(16,16))
print("A sample digit from the dataset:")
plt.imshow(samplerow, cmap="hot")
plt.show()
# Initialize the weight matrix
weights = np.zeros((10,256))
print(weights.shape)
################ PLA ALGORITHM##############################
E = 15
errors = []
for epoch in range(E):
err = 0 #reset the error
# For each handwritten digit in training set,
for i, x in enumerate(traindata):
dp=[] #create a new container for the calculated dot products
# For each digit class (0-9)
for w in weights:
dotproduct = np.dot(x,w)
#take the dot product of the weight and the data
dp.append(dotproduct)
#add the dot product to the list of dot products
guess = np.argmax(dp)
#take the largest dot product and make that the guessed digit class
actual = trainlabels[i]
# If the guess was wrong, update the weight vectors
if guess != actual:
weights[guess] = weights[guess] - x #update the incorrect (guessed) weight vector
weights[actual] = weights[actual] + x #update the correct weight vector
err += 1
errors.append(err/7291) #track the error after each pass through the training set
plt.plot(list(range(0,E)),errors) #plot the error after all training epochs
plt.title('Error Rate while using PLA Algorithm')
plt.show()
################################################ TEST DATA ###############################################
##Reading from internet
# test = pd.read_csv('http://amlbook.com/data/zip/zip.test', delimiter=' ', header=None)
# test.to_csv('test.csv', index=False)
test = pd.read_csv('C:/Users/syedd/test.csv', delimiter=',', header='infer')
test.shape
del test['257']
test.shape
print("Test set has {0[0]} rows and {0[1]} columns".format(test.shape))
testlabels = test['0']
testlabels=testlabels.astype(int)
actualvalues=test['0']
del test['0']
test.shape
testdata = np.asmatrix(test.loc[:,:])
testdata.shape
guesses = []
for i, z in enumerate(testdata):
dp=[]
for w in weights:
dotproduct = np.dot(z,w)
dp.append(dotproduct)
guess = np.argmax(dp)
guesses.append(guess)
print("Preparing the Predictions file with the guesses")
Prediction = pd.DataFrame(data= {'Prediction': guesses})
Prediction.shape
Prediction.to_csv('predictions_On_Test_Data_Using_PLA_Algorithm.csv', index=False)
from sklearn.metrics import mean_squared_error
from math import sqrt
print("Error Rate for the PLA Algorithm: {}".format(sqrt(mean_squared_error(testlabels, guesses))))
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(testlabels, guesses)
print("Accuracy for the PLA Algorithm:")
print(accuracy)
print("\n")
##################################### POCKET PLA ALGORITHM - ON RAW DATA WITH 256 FEATURES AND ALSO MAKING PREDICTIONS ON TEST RAW DATA ##################################3
import numpy as np
from random import choice
from numpy import array, dot, random
import matplotlib.pyplot as plt
import pandas as pd
#Reading from Internet
# train = pd.read_csv('http://amlbook.com/data/zip/zip.train', delimiter=' ', header=None)
# train.to_csv('train.csv', index=False)
train = | pd.read_csv('C:/Users/syedd/train.csv', delimiter=',', header='infer') | pandas.read_csv |
# Functionality for fitting models and forming predictions
import os
import numpy as np
import pandas as pd
from multiprocessing import Pool, cpu_count
from functools import partial
from sklearn.preprocessing import scale
def apply_parallel(df_grouped, func, num_cores=cpu_count(), **kwargs):
"""Apply func to each group dataframe in df_grouped in parallel
Args:
df_grouped: output of grouby applied to pandas DataFrame
func: function to apply to each group dataframe in df_grouped
num_cores: number of CPU cores to use
kwargs: additional keyword args to pass to func
"""
# Associate only one OpenMP thread with each core
os.environ['OMP_NUM_THREADS'] = str(1)
pool = Pool(num_cores)
# Pass additional keyword arguments to func using partial
ret_list = pool.map(partial(func, **kwargs), [group for name, group in df_grouped])
pool.close()
# Unset environment variable
del os.environ['OMP_NUM_THREADS']
return pd.concat(ret_list)
def fit_and_visualize(df, x_cols=None, last_train_date=None,
model=None):
"""Fits model to training set and returns standardized coefficients
for visualization. No train-test split or holdout is performed.
Args:
df: Dataframe with columns 'year', 'start_date', 'lat', 'lon',
x_cols, 'target', 'sample_weight'
x_cols: Names of columns used as input features
last_train_date: Last date to use in training
model: sklearn-compatible model with fit and predict methods
Returns standardized regression coefficients.
"""
train_df = df.loc[df.start_date <= last_train_date].dropna(
subset=x_cols+['target','sample_weight'])
# Fit model and return coefficients
fit_model = model.fit(X = scale(train_df[x_cols]),
y = train_df['target'],
sample_weight = train_df['sample_weight'].as_matrix())
return | pd.DataFrame([fit_model.coef_], columns=x_cols) | pandas.DataFrame |
# -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
"""
import numpy as np
import pandas as pd
from hypernets.core.callbacks import SummaryCallback, FileLoggingCallback
from hypernets.core.searcher import OptimizeDirection
from hypernets.searchers.random_searcher import RandomSearcher
from sklearn.model_selection import train_test_split
from deeptables.datasets import dsutils
from deeptables.models.deeptable import DeepTable
from deeptables.models.hyper_dt import HyperDT
from deeptables.models.hyper_dt import mini_dt_space, default_dt_space
from .. import homedir
class Test_HyperDT():
def bankdata(self):
rs = RandomSearcher(mini_dt_space, optimize_direction=OptimizeDirection.Maximize, )
hdt = HyperDT(rs,
callbacks=[SummaryCallback(), FileLoggingCallback(rs, output_dir=f'{homedir}/hyn_logs')],
reward_metric='accuracy',
max_trails=3,
dnn_params={
'hidden_units': ((256, 0, False), (256, 0, False)),
'dnn_activation': 'relu',
},
)
df = dsutils.load_bank()
df.drop(['id'], axis=1, inplace=True)
df_train, df_test = train_test_split(df, test_size=0.2, random_state=42)
y = df_train.pop('y')
y_test = df_test.pop('y')
hdt.search(df_train, y, df_test, y_test)
assert hdt.best_model
best_trial = hdt.get_best_trail()
estimator = hdt.final_train(best_trial.space_sample, df_train, y)
score = estimator.predict(df)
result = estimator.evaluate(df, y)
assert len(score) == 100
assert result
assert isinstance(estimator.model, DeepTable)
def test_default_dt_space(self):
space = default_dt_space()
space.random_sample()
assert space.Module_DnnModule_1.param_values['dnn_layers'] == len(
space.DT_Module.config.dnn_params['hidden_units'])
assert space.Module_DnnModule_1.param_values['hidden_units'] == \
space.DT_Module.config.dnn_params['hidden_units'][0][
0]
assert space.Module_DnnModule_1.param_values['dnn_dropout'] == \
space.DT_Module.config.dnn_params['hidden_units'][0][
1]
assert space.Module_DnnModule_1.param_values['use_bn'] == space.DT_Module.config.dnn_params['hidden_units'][0][
2]
def test_hyper_dt(self):
rs = RandomSearcher(mini_dt_space, optimize_direction=OptimizeDirection.Maximize, )
hdt = HyperDT(rs,
callbacks=[SummaryCallback()],
reward_metric='accuracy',
dnn_params={
'hidden_units': ((256, 0, False), (256, 0, False)),
'dnn_activation': 'relu',
},
cache_preprocessed_data=True,
cache_home=homedir + '/cache'
)
x1 = np.random.randint(0, 10, size=(100), dtype='int')
x2 = np.random.randint(0, 2, size=(100)).astype('str')
x3 = np.random.randint(0, 2, size=(100)).astype('str')
x4 = np.random.normal(0.0, 1.0, size=(100))
y = np.random.randint(0, 2, size=(100), dtype='int')
df = | pd.DataFrame({'x1': x1, 'x2': x2, 'x3': x3, 'x4': x4}) | pandas.DataFrame |
import ast
import json
import pandas as pd
from skorecard.reporting import build_bucket_table
from skorecard.apps.app_utils import determine_boundaries, is_increasing, is_sequential
from skorecard.utils.exceptions import NotInstalledError
# Dash + dependencies
try:
from dash.dependencies import Input, Output, State
from dash import no_update
import dash_table
except ModuleNotFoundError:
Input = NotInstalledError("dash", "dashboard")
Output = NotInstalledError("dash", "dashboard")
State = NotInstalledError("dash", "dashboard")
dash_table = NotInstalledError("dash_table", "dashboard")
def add_bucketing_callbacks(self, X, y):
"""
Adds callbacks to the interactive bucketing app.
Meant for normal bucketers, not two step BucketingProcess.
"""
app = self.app
add_common_callbacks(self)
@app.callback(
[Output("input_map", "value")],
[
Input("input_column", "value"),
],
)
def update_input_map(col):
"""Update bucketer map."""
input_map = self.features_bucket_mapping_.get(col).map
col_type = self.features_bucket_mapping_.get(col).type
if col_type == "categorical":
# We also allow for treating numerical as categoricals
# if key is a string, we'll need to quote them
if isinstance(list(input_map.keys())[0], str):
str_repr = ",\n\t".join([f"'{k}': {v}" for k, v in input_map.items()])
else:
str_repr = ",\n\t".join([f"{k}: {v}" for k, v in input_map.items()])
str_repr = f"{{\n\t{str_repr}\n}}"
else:
str_repr = str(input_map)
return [str_repr]
@app.callback(
[Output("input_map_helptext", "children")],
[
Input("input_column", "value"),
],
)
def update_input_map_feedback(col):
col_type = self.features_bucket_mapping_.get(col).type
right = self.features_bucket_mapping_.get(col).right
if col_type == "categorical":
msg = "Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}"
if col_type == "numerical" and right:
msg = "Edit the prebucket mapping boundaries. "
msg += "Values up to and including the boundary are put into a bucket (right=True)"
if col_type == "numerical" and not right:
msg = "Edit the prebucket mapping boundaries. "
msg += "Values up to but not including the boundary are put into a bucket (right=False)"
return [msg]
@app.callback(
[
Output("bucket_table", "data"),
Output("graph-bucket", "figure"),
Output("input_map", "invalid"),
Output("input_map_feedback", "children"),
],
[Input("input_map", "value")],
[State("input_column", "value")],
)
def get_bucket_table(input_map, col):
"""Loads the table and the figure, when the input_map changes."""
col_type = self.features_bucket_mapping_.get(col).type
# Load the object from text input into python object
if col_type == "numerical":
try:
input_map = json.loads(input_map)
assert len(input_map) > 0
except Exception:
msg = "Make sure the input is properly formatted as a list"
return no_update, no_update, True, [msg]
# validate input
if not is_increasing(input_map):
return no_update, no_update, True, ["Make sure the list values are in increasing order"]
else:
try:
# note using ast.literal_eval is not safe
# for use when you don't trust the user input
# in this case, it's a local user using his/her own kernel
# note: we're using literal_eval because JSON enforces quoted keys
input_map = ast.literal_eval(input_map)
# re-sort on value, key
input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0])))
except Exception:
msg = "Make sure the input is properly formatted as a dictionary"
return no_update, no_update, True, [msg]
# validate input
if not min(input_map.values()) == 0:
msg = "Dictionary values (buckets) must start at 0"
return no_update, no_update, True, [msg]
if not is_sequential(list(input_map.values())):
msg = "Dictionary values (buckets) must be sequentially increasing with steps of 1"
return no_update, no_update, True, [msg]
# Update the fit for this specific column
special = self.features_bucket_mapping_.get(col).specials
right = self.features_bucket_mapping_.get(col).right
# Note we passed X, y to add_bucketing_callbacks() so they are available here.
# make sure to re-generate the summary table
self._update_column_fit(
X=X, y=y, feature=col, special=special, splits=input_map, right=right, generate_summary=True
)
# Retrieve the new bucket tables and plots
table = self.bucket_table(col)
# unsupervised bucketers don't have an event rate.
if "Event Rate" in table.columns:
table["Event Rate"] = round(table["Event Rate"] * 100, 2)
fig = self.plot_bucket(col)
# remove title from plot
fig.update_layout(title="")
return table.to_dict("records"), fig, False, no_update
def add_bucketing_process_callbacks(self, X, y):
"""
Adds callbacks to the interactive bucketing app.
Meant two step BucketingProcess.
"""
app = self.app
add_common_callbacks(self)
@app.callback(
[Output("bucketingprocess-helptext", "style")],
[
Input("input_column", "value"),
],
)
def update_sidebar_helptext(col):
return [{"display": "block"}]
@app.callback(
[Output("input_map", "value")],
[
Input("input_column", "value"),
],
)
def update_input_map(col):
"""Update bucketer map."""
input_map = self.pre_pipeline_.features_bucket_mapping_.get(col).map
col_type = self.pre_pipeline_.features_bucket_mapping_.get(col).type
if col_type == "categorical":
# We also allow for treating numerical as categoricals
# if key is a string, we'll need to quote them
if isinstance(list(input_map.keys())[0], str):
str_repr = ",\n\t".join([f"'{k}': {v}" for k, v in input_map.items()])
else:
str_repr = ",\n\t".join([f"{k}: {v}" for k, v in input_map.items()])
str_repr = f"{{\n\t{str_repr}\n}}"
else:
str_repr = str(input_map)
return [str_repr]
@app.callback(
[Output("input_map_helptext", "children")],
[
Input("input_column", "value"),
],
)
def update_input_map_feedback(col):
col_type = self.pre_pipeline_.features_bucket_mapping_.get(col).type
right = self.pre_pipeline_.features_bucket_mapping_.get(col).right
if col_type == "categorical":
msg = "Edit the prebucket mapping dictionary, e.g. {'value' : 'pre-bucket'}"
if col_type == "numerical" and right:
msg = "Edit the prebucket mapping boundaries. "
msg += "Values up to and including the boundary are put into a bucket (right=True)"
if col_type == "numerical" and not right:
msg = "Edit the prebucket mapping boundaries. "
msg += "Values up to but not including the boundary are put into a bucket (right=False)"
return [msg]
@app.callback(
[
Output("pre_bucket_table", "data"),
Output("input_map", "invalid"),
Output("input_map_feedback", "children"),
],
[Input("input_map", "value")],
[State("input_column", "value")],
)
def get_prebucket_table(input_map, col):
"""Loads the table and the figure, when the input_map changes."""
col_type = self.pre_pipeline_.features_bucket_mapping_.get(col).type
# Load the object from text input into python object
if col_type == "numerical":
try:
input_map = json.loads(input_map)
assert len(input_map) > 0
except Exception:
msg = "Make sure the input is properly formatted as a list"
return no_update, True, [msg]
# validate input
if not is_increasing(input_map):
return no_update, True, ["Make sure the list values are in increasing order"]
else:
try:
# note using ast.literal_eval is not safe
# for use when you don't trust the user input
# in this case, it's a local user using his/her own kernel
input_map = ast.literal_eval(input_map)
# re-sort on value, key
input_map = dict(sorted(input_map.items(), key=lambda x: (x[1], x[0])))
except Exception:
msg = "Make sure the input is properly formatted as a dictionary"
return no_update, True, [msg]
# validate input
if not min(input_map.values()) == 0:
msg = "Dictionary values (buckets) must start at 0"
return no_update, True, [msg]
if not is_sequential(list(input_map.values())):
msg = "Dictionary values (buckets) must be sequentially increasing with steps of 1"
return no_update, True, [msg]
# Update the fit for this specific column
# Note we passed X, y to add_bucketing_process_callbacks() so they are available here.
# make sure to re-generate the summary table
for step in self.pre_pipeline_.steps:
if col in step[1].variables:
step[1]._update_column_fit(
X=X,
y=y,
feature=col,
special=self._prebucketing_specials.get(col, {}),
splits=input_map,
right=self.pre_pipeline_.features_bucket_mapping_.get(col).right,
generate_summary=True,
)
self.prebucket_tables_[col] = build_bucket_table(
X, y, column=col, bucket_mapping=self.pre_pipeline_.features_bucket_mapping_.get(col)
)
# Re-calculate the BucketingProcess summary
self._generate_summary(X, y)
# Retrieve the new bucket tables and plots
table = self.prebucket_table(col)
# unsupervised bucketers don't have an event rate.
if "Event Rate" in table.columns:
table["Event Rate"] = round(table["Event Rate"] * 100, 2)
return table.to_dict("records"), False, no_update
@app.callback(
[
Output("bucket_table", "data"),
Output("graph-prebucket", "figure"),
Output("graph-bucket", "figure"),
Output("bucket-error-msg", "children"),
Output("bucket-error-msg", "style"),
],
[Input("pre_bucket_table", "data")],
[State("input_column", "value")],
)
def get_bucket_table(prebucket_table, col):
# Get the input from the prebucket table
new_buckets = | pd.DataFrame() | pandas.DataFrame |
from ipywidgets import IntSlider, Box, Layout, Label, Dropdown, Button
from IPython.display import display, HTML, clear_output
from core.whose_cpp_code import classify_authors
from plotly import __version__
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import plotly.graph_objs as go
import pandas as pd
import matplotlib.pyplot as plt
import warnings
import numpy as np
import time
init_notebook_mode(connected=True)
warnings.filterwarnings("ignore")
form_layout = Layout(
display='flex',
flex_flow='column',
align_items='stretch',
width='50%'
)
form_item_layout = Layout(
display='flex',
flex_flow='row',
justify_content='space-between'
)
# loops = IntSlider(min=1, max=10)
data = Dropdown(options={'students': './matricies/students/',
'GoogleCodeJam': './matricies/GoogleCodeJam/',
'GitHub': './matricies/GitHub_short/',
'user_data': './data/matricies'})
classifier = Dropdown(options={'RandomForest': 'RandomForestClassifier',
'ExtraTrees': 'ExtraTreesClassifier',
'AdaBoost': 'AdaBoostClassifier'})
def make_metrics_bar(metrics, loops_num):
trace0 = go.Bar(
x=list(range(1, loops_num + 1)),
y=metrics['f1_score'],
name='F1-score',
marker=dict(
color='rgb(136, 142, 150)'
)
)
trace1 = go.Bar(
x=list(range(1, loops_num + 1)),
y=metrics['precision'],
name='Precision',
marker=dict(
color='rgb(204,204,204)',
)
)
trace2 = go.Bar(
x=list(range(1, loops_num + 1)),
y=metrics['recall'],
name='Recall',
marker=dict(
color='rgb(144, 177, 229)',
)
)
trace3 = go.Bar(
x=list(range(1, loops_num + 1)),
y=metrics['accuracy'],
name='Accuracy',
marker=dict(
color='rgb(49,130,189)',
)
)
data = [trace0, trace1, trace2, trace3]
layout = go.Layout(
xaxis=dict(
tickangle=-45,
title='Number of experiment',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
),
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
yaxis=dict(
title='Value, %',
titlefont=dict(
size=16,
color='rgb(107, 107, 107)'
),
tickfont=dict(
size=14,
color='rgb(107, 107, 107)'
)
),
barmode='group',
title='Classification metrics',
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, filename='metrics-bar')
def make_pie(mean_accuracy):
fig = {
"data": [
{
"values": [1 - mean_accuracy, mean_accuracy],
"labels": ['Wrong predicted samples, %', 'True predictes samples, %'],
"type": "pie",
"text": "Accuracy",
"textposition":"inside",
"hole": .4,
# "domain": {"x": [.52, 1]},
}],
"layout": {
"title": 'Total mean accuracy',
"annotations": [
{
"font": {
"size": 20
},
"showarrow": False,
"text": "Accuracy",
}
]
}
}
iplot(fig)
loops = IntSlider(min=1, max=10)
form_items = [
Box([Label(value='Циклов:'), loops], layout=form_item_layout),
Box([Label(value='Данные:'), data], layout=form_item_layout),
Box([Label(value='Алгоритм:'), classifier], layout=form_item_layout),
]
def classify_mul(b):
clear_output()
accuracy, precision, recall, f1_score = [], [], [], []
start_time = time.time()
for loop in range(loops.value):
print('Цикл ', loop + 1, ': Пожалуйста, ожидайте...')
report = classify_authors(data.value, classifier.value)
df = | pd.DataFrame(report) | pandas.DataFrame |
from scipy import stats
import re
from pathlib import Path
import numpy as np
import pandas as pd
from astropy.io import ascii
from tqdm import tqdm
RNG = np.random.RandomState(952020)
DATAPATH = Path("./data")
RESULTSPATH = Path("./results")
FIGURESPATH = Path("./paper/figures")
def get_data(dataset, **kwargs):
"""Returns the SNIDs, Age dataframe, and HR dataframe.
A row in these dataframes correspond to one supernovae by its SNID.
"""
age_df = load_age_sample_from_mcmc_chains(dataset, **kwargs)
hr_df = load_hr()
age_df, hr_df = clean_data(age_df, hr_df)
snids = age_df.index.unique().tolist()
return snids, age_df, hr_df
def load_age_sample_from_mcmc_chains(dataset, mode="read", **kwargs):
"""Return a random sample of the mcmc chains dataset for all SNe available.
dataset : str {"campbell", "campbellG", "gupta"}
Dataset to load.
mode : str {"read", "write"}
If write, redo the creation of the mcmc chain sample and also write to file.
WARNING: this may take a long time sampling from ~20GB of data.
If read, the mcmc chain sample is read from cached output of the "write" mode.
**kwargs : keyword arguments
See _create_age_sample_from_mcmc_chains
"""
dataset_path = DATAPATH / "mcmc_chains" / dataset
if mode == "write":
assert dataset_path.exists(), \
f"{dataset} tarball not extracted or not found in {dataset_path}"
return _create_age_sample_from_mcmc_chains(dataset, dataset_path, **kwargs)
if mode == "read":
return pd.read_table(DATAPATH / f"{dataset}_samples.tsv")
def _create_age_sample_from_mcmc_chains(dataset, dataset_path,
sample_size=10000,
random_state=RNG,
min_pvalue=0.05):
def get_downsample_efficient(sn_chain_path):
"""DEPRECATED"""
# Get number of rows in file
# Header takes up 2 rows
nheaders = 2
nrows = sum(1 for line in open(sn_chain_path)) - nheaders
skiprows = [1] + sorted(
random_state.choice(
range(nheaders, nrows + nheaders), nrows - sample_size, replace=False
)
)
_df = pd.read_table(
sn_chain_path, skiprows=skiprows, usecols=[7], index_col=False
)
return _df
dfs = []
all_dataset_files = list(dataset_path.glob("*.tsv"))
for i, sn_chain_path in tqdm(
enumerate(all_dataset_files),
total=len(all_dataset_files), desc="Downsampling",
):
all_df = pd.read_table(sn_chain_path,
skiprows=[1],
usecols=[7],
index_col=False)
similar = False
while not similar:
downsample_df = all_df.sample(n=sample_size, replace=False)
ks = stats.ks_2samp(all_df['age'], downsample_df['age'])
similar = ks.pvalue >= min_pvalue
if not similar:
print(f"KS p-value too small, resampling {sn_chain_path.name}")
# Set the index as the SNID parsed from its filename
snid = re.findall(r"SN(\d+)_", sn_chain_path.name)[0]
downsample_df["snid"] = [snid] * len(downsample_df)
dfs.append(downsample_df)
df = | pd.concat(dfs) | pandas.concat |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(2, inplace=True)
df_2012_2013['prcab'].fillna(2, inplace=True)
df_2014_2015['prcab'].fillna(2, inplace=True)
df_2016_2017['prcab'].fillna(2, inplace=True)
df_2018_2019['prcab'].fillna(2, inplace=True)
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
def groupby_siteid():
df2010 = df_2010.groupby('siteid')['siteid'].count().reset_index(name='2010_total')
df2011 = df_2011.groupby('siteid')['siteid'].count().reset_index(name='2011_total')
df2012 = df_2012.groupby('siteid')['siteid'].count().reset_index(name='2012_total')
df2013 = df_2013.groupby('siteid')['siteid'].count().reset_index(name='2013_total')
df2014 = df_2014.groupby('siteid')['siteid'].count().reset_index(name='2014_total')
df2015 = df_2015.groupby('siteid')['siteid'].count().reset_index(name='2015_total')
df2016 = df_2016.groupby('siteid')['siteid'].count().reset_index(name='2016_total')
df2017 = df_2017.groupby('siteid')['siteid'].count().reset_index(name='2017_total')
df2018 = df_2018.groupby('siteid')['siteid'].count().reset_index(name='2018_total')
df2019 = df_2019.groupby('siteid')['siteid'].count().reset_index(name='2019_total')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 =pd.merge(df5, df2016, on='siteid', how='outer')
df7 =pd.merge(df6, df2017, on='siteid', how='outer')
df8 =pd.merge(df7, df2018, on='siteid', how='outer')
df_sum_all_Years =pd.merge(df8, df2019, on='siteid', how='outer')
df_sum_all_Years.fillna(0,inplace=True)
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_sum'] = df_sum_all_Years['Year_sum']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
avg_siteid['num_of_years'] = df_sum_all_Years['Distinct_years']
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 =pd.merge(df2010, df2011, on='siteid', how='outer')
df2 =pd.merge(df1, df2012, on='siteid', how='outer')
df3 =pd.merge(df2, df2013, on='siteid', how='outer')
df4 =pd.merge(df3, df2014, on='siteid', how='outer')
df5 =pd.merge(df4, df2015, on='siteid', how='outer')
df6 = | pd.merge(df5, df2016, on='siteid', how='outer') | pandas.merge |
from unittest import TestCase
import pandas as pd
import scripts.vars as my_vars
from scripts.utils import update_confusion_matrix
class TestUpdateConfusionMatrix(TestCase):
"""Tests update_confusion_matrix() from utils.py"""
def test_update_confusion_matrix(self):
"""Tests that TP, FN, FP, TN are updated correctly"""
my_vars.conf_matrix = {my_vars.TP: {0}, my_vars.FP: {2}, my_vars.TN: {1}, my_vars.FN: set()}
positive_class = "apple"
class_col_name = "Class"
examples = [
pd.Series({"A": "low", "B": (1, 1), "C": (3, 3), "Class": "apple"}, name=3),
pd.Series({"A": "low", "B": (1, 1), "C": (3, 3), "Class": "banana"}, name=4),
pd.Series({"A": "low", "B": (1, 1), "C": (3, 3), "Class": "apple"}, name=5),
pd.Series({"A": "low", "B": (1, 1), "C": (3, 3), "Class": "banana"}, name=6),
]
rules = [
pd.Series({"A": "low", "B": (1, 1), "C": (3, 3), "Class": "apple"}, name=0),
| pd.Series({"A": "low", "B": (1, 1), "C": (2, 2), "Class": "banana"}, name=1) | pandas.Series |
import re
import pandas as pd
import numpy as np
from tqdm import tqdm
from mlflow.tracking import MlflowClient
from pathlib import Path
from typing import Set, Dict, Any, List, Optional
class MlflowHelper:
def __init__(
self,
tracking_uri: str = "http://localhost:5000",
local_mlflow_dir_prefix: str = "../gsim01/mlruns/",
experiment_name: str = "Domain Guided Monitoring",
experiment_id: Optional[str] = "1",
pkl_file: Optional[Path] = None,
):
self.mlflow_client = MlflowClient(tracking_uri=tracking_uri)
self.experiment_id = experiment_id if experiment_id is not None else self.mlflow_client.get_experiment_by_name(experiment_name).experiment_id
self.local_mlflow_dir = local_mlflow_dir_prefix + str(self.experiment_id) + "/"
if pkl_file is not None and pkl_file.exists():
self.run_df = pd.read_pickle("mlflow_run_df.pkl")
print("Initialized with", len(self.run_df), "MLFlow runs from pkl")
else:
self.run_df = pd.DataFrame(columns=["info_run_id"])
self.metric_history_names: Set[str] = set()
def query_valid_runs(self,
pkl_file: Optional[Path] = None,
valid_sequence_types: List[str] = ['mimic', 'huawei_logs'],
filter_string_suffix: Optional[str] = " and params.ModelConfigrnn_type = 'gru'"):
for sequence_type in valid_sequence_types:
filter_string = "tags.sequence_type = '" + sequence_type + "'"
if filter_string_suffix is not None:
filter_string = filter_string + filter_string_suffix
self.query_runs(filter_string=filter_string)
print("Queried", len(self.run_df), "runs from MLFlow for", sequence_type)
if pkl_file is not None:
self.run_df.to_pickle(pkl_file)
def query_runs(self, filter_string: Optional[str] = None, pkl_file: Optional[Path] = None,):
runs = self.mlflow_client.search_runs(
experiment_ids=[self.experiment_id], max_results=10000, filter_string=filter_string,
)
for run in tqdm(runs, desc="Querying data per run..."):
self._handle_run(run)
if pkl_file is not None:
self.run_df.to_pickle(pkl_file)
def _handle_run(self, run):
if (
len(self.run_df) > 0
and run.info.run_id in set(self.run_df["info_run_id"])
and run.info.status == "FINISHED"
and len(
self.run_df[
(self.run_df["info_run_id"] == run.info.run_id)
& (self.run_df["info_status"] == run.info.status)
]
)
== 1
):
return
if not run.info.status == "FINISHED" and not run.info.run_id in set(
self.run_df["info_run_id"]
):
return
run_dict = {
(k + "_" + sk): v
for k, sd in run.to_dictionary().items()
for sk, v in sd.items()
}
final_run_dict = {
(k + "_" + sk): v
for k, sd in run_dict.items()
if type(sd) == type(dict())
for sk, v in sd.items()
}
final_run_dict.update(
{k: v for k, v in run_dict.items() if not (type(v) == type(dict()))}
)
if (
final_run_dict.get("data_tags_model_type", "") == "causal"
and final_run_dict.get(
"data_params_KnowledgeConfigadd_causality_prefix", "False"
)
== "True"
):
final_run_dict["data_tags_model_type"] = "causal2"
if (
(final_run_dict.get("data_tags_model_type", "") == "causal"
or final_run_dict.get("data_tags_model_type", "") == "causal2")
and final_run_dict.get("data_tags_sequence_type", "") == "huawei_logs"
and final_run_dict.get("data_params_HuaweiPreprocessorConfiglog_only_causality", "") == "True"
):
final_run_dict["data_tags_model_type"] = final_run_dict["data_tags_model_type"] + "_logonly"
if (
final_run_dict.get("data_tags_model_type", "") == "text"
and final_run_dict.get(
"data_params_KnowledgeConfigbuild_text_hierarchy", "False"
)
== "True"
):
final_run_dict["data_tags_model_type"] = "text_hierarchy"
if (
final_run_dict.get("data_tags_model_type", "") == "gram"
and final_run_dict.get("data_tags_sequence_type", "") == "huawei_logs"
and final_run_dict.get("data_params_KnowledgeConfigadd_causality_prefix")
and final_run_dict.get(
"data_params_HuaweiPreprocessorConfiguse_log_hierarchy", "False"
)
== "True"
):
final_run_dict["data_tags_model_type"] = "gram_logs"
self.run_df = self.run_df.append(
final_run_dict, ignore_index=True
).drop_duplicates(subset=["info_run_id"], keep="last", ignore_index=True)
def mimic_run_df(
self, include_noise: bool = False, include_refinements: bool = False,
risk_prediction: bool = False,
valid_x_columns: List[str]=["level_0"],
valid_y_columns: List[str]=["level_3"],
) -> pd.DataFrame:
mimic_run_df = self.run_df[
(self.run_df["data_tags_sequence_type"] == "mimic")
& (self.run_df["data_params_ModelConfigrnn_type"] == "gru")
& (self.run_df["data_params_SequenceConfigtest_percentage"].fillna("").astype(str) == "0.2")
& (self.run_df["data_params_ModelConfigbest_model_metric"] == "val_loss")
& (self.run_df["info_status"] == "FINISHED")
& (self.run_df["data_params_ModelConfigrnn_dim"] == "200")
& (self.run_df["data_params_ModelConfigoptimizer"].fillna("adam") == "adam")
& (self.run_df["data_params_ModelConfigdropout_rate"].fillna("0.0").astype(str) == "0.5")
& (self.run_df["data_params_ModelConfigrnn_dropout"].fillna("0.0").astype(str) == "0.0")
& (self.run_df["data_params_ModelConfigkernel_regularizer_scope"].fillna("[]") == "[]")
& (self.run_df["data_params_SequenceConfigpredict_full_y_sequence_wide"].astype(str).fillna("") == "True")
& (
(
(self.run_df["data_params_SequenceConfigy_sequence_column_name"].astype(str) == "level_3")
& (self.run_df["data_params_ExperimentConfigbatch_size"].astype(str).fillna("") == "128")
) |
(
(self.run_df["data_params_SequenceConfigy_sequence_column_name"].astype(str) == "level_2")
& (self.run_df["data_params_ExperimentConfigbatch_size"].astype(str).fillna("") == "16")
)
)
& (self.run_df["data_params_MimicPreprocessorConfigreplace_keys"].fillna("[]") == "[]")
]
if risk_prediction:
mimic_run_df = mimic_run_df[
(mimic_run_df["data_tags_task_type"] == "risk_prediction") &
(mimic_run_df["data_params_ModelConfigfinal_activation_function"] == "sigmoid")
]
else:
mimic_run_df = mimic_run_df[
(mimic_run_df["data_params_ModelConfigfinal_activation_function"] == "softmax")
& (mimic_run_df["data_params_SequenceConfigflatten_y"] == "True")
]
if len(valid_x_columns) > 0:
mimic_run_df = mimic_run_df[
mimic_run_df["data_params_SequenceConfigx_sequence_column_name"].apply(lambda x: x in valid_x_columns)
]
if len(valid_y_columns) > 0:
mimic_run_df = mimic_run_df[
mimic_run_df["data_params_SequenceConfigy_sequence_column_name"].apply(lambda x: x in valid_y_columns)
]
if not include_noise:
mimic_run_df = mimic_run_df[
(mimic_run_df["data_tags_noise_type"].fillna("").apply(len) == 0)
]
if not include_refinements:
mimic_run_df = mimic_run_df[
(mimic_run_df["data_tags_refinement_type"].fillna("") == "")
]
return mimic_run_df
def huawei_run_df(
self, include_noise: bool = False, include_refinements: bool = False,
risk_prediction: bool = False,
valid_x_columns: List[str]=["log_cluster_template", "fine_log_cluster_template"],
valid_y_columns: List[str]=["attributes"],
include_drain_hierarchy: bool=False,
) -> pd.DataFrame:
huawei_run_df = self.run_df[
(self.run_df["data_tags_sequence_type"] == "huawei_logs")
& (self.run_df["data_params_ModelConfigrnn_type"] == "gru")
& (self.run_df["data_params_SequenceConfigtest_percentage"].fillna("").astype(str) == "0.1")
& (self.run_df["data_params_ModelConfigbest_model_metric"] == "val_loss")
& (self.run_df["info_status"] == "FINISHED")
& (self.run_df["data_params_ModelConfigrnn_dim"] == "200")
& (self.run_df["data_params_ModelConfigoptimizer"].fillna("adam") == "adam")
& (self.run_df["data_params_ModelConfigdropout_rate"].fillna("0.0").astype(str) == "0.5")
& (self.run_df["data_params_ModelConfigrnn_dropout"].fillna("0.0").astype(str) == "0.0")
& (self.run_df["data_params_ModelConfigkernel_regularizer_scope"].fillna("[]") == "[]")
& (self.run_df["data_params_ExperimentConfigbatch_size"].astype(str).fillna("") == "128")
& (
(self.run_df["data_params_HuaweiPreprocessorConfigfine_drain_log_st"].astype(str).fillna("") == "0.75")
| (self.run_df["data_params_HuaweiPreprocessorConfigdrain_log_st"].astype(str).fillna("") == "0.75")
)
& (
(self.run_df["data_params_HuaweiPreprocessorConfigfine_drain_log_depth"].astype(str).fillna("") == "10")
| (self.run_df["data_params_HuaweiPreprocessorConfigdrain_log_depth"].astype(str).fillna("") == "10")
)
& (
(~ (
(self.run_df["data_params_SequenceConfigx_sequence_column_name"].astype(str).fillna("") == "coarse_log_cluster_template")
| (self.run_df["data_params_SequenceConfigy_sequence_column_name"].astype(str).fillna("") == "coarse_log_cluster_template")
| (self.run_df["data_params_HuaweiPreprocessorConfigdrain_log_sts"].fillna("[]").astype(str).apply(len) > 2)
)) | (
(self.run_df["data_params_HuaweiPreprocessorConfigcoarse_drain_log_st"].astype(str).fillna("") == "0.2")
& (self.run_df["data_params_HuaweiPreprocessorConfigcoarse_drain_log_depth"].astype(str).fillna("") == "4")
)
)
]
if risk_prediction:
huawei_run_df = huawei_run_df[
(huawei_run_df["data_tags_task_type"] == "risk_prediction") &
(huawei_run_df["data_params_ModelConfigfinal_activation_function"] == "sigmoid")
]
else:
huawei_run_df = huawei_run_df[
(huawei_run_df["data_params_ModelConfigfinal_activation_function"] == "softmax")
& (huawei_run_df["data_params_SequenceConfigflatten_y"] == "True")
]
if len(valid_x_columns) > 0:
huawei_run_df = huawei_run_df[
huawei_run_df["data_params_SequenceConfigx_sequence_column_name"].apply(lambda x: x in valid_x_columns)
]
if len(valid_y_columns) > 0:
huawei_run_df = huawei_run_df[
huawei_run_df["data_params_SequenceConfigy_sequence_column_name"].apply(lambda x: x in valid_y_columns)
]
if not include_noise:
huawei_run_df = huawei_run_df[
(huawei_run_df["data_tags_noise_type"].fillna("").apply(len) == 0)
]
if not include_refinements:
huawei_run_df = huawei_run_df[
(huawei_run_df["data_tags_refinement_type"].fillna("") == "")
& (huawei_run_df["data_params_HuaweiPreprocessorConfigmin_causality"].fillna(0.0).astype(str) == "0.01")
]
if not include_drain_hierarchy:
huawei_run_df = huawei_run_df[
huawei_run_df["data_params_HuaweiPreprocessorConfigdrain_log_sts"].fillna("[]").astype(str).apply(len) <= 2
]
return huawei_run_df
def _load_metrics_from_local(self, run_id: str) -> Optional[Dict[str, List[float]]]:
local_run_dir = Path(self.local_mlflow_dir + "/" + run_id + "/metrics/")
if not local_run_dir.exists() or not local_run_dir.is_dir():
return None
metric_dict: Dict[str, List[float]] = {}
for metric_file in local_run_dir.iterdir():
metric = metric_file.name
metric_history = pd.read_csv(metric_file, sep=" ", names=["time", "value", "step"]).to_dict(orient='index')
metric_dict[metric+"_history"] = [x["value"] for x in sorted(metric_history.values(), key=lambda x: x["step"])]
metric_dict[metric+"_times"] = [x["time"] for x in sorted(metric_history.values(), key=lambda x: x["step"])]
return metric_dict
def _load_metrics_from_remote(self, run_id: str) -> Dict[str, List[float]]:
run = self.mlflow_client.get_run(run_id)
metric_dict: Dict[str, Any] = {}
for metric in run.data.metrics.keys():
metric_history = self.mlflow_client.get_metric_history(
run.info.run_id, metric
)
metric_dict[metric + "_history"] = [
metric.value
for metric in sorted(metric_history, key=lambda x: x.step)
]
metric_dict[metric + "_times"] = [
metric.time
for metric in sorted(metric_history, key=lambda x: x.step)
]
return metric_dict
def load_metric_history_for_ids(
self, run_ids: Set[str],
):
metric_records = []
for run_id in tqdm(run_ids, desc="Querying metrics for runs"):
metric_dict = self._load_metrics_from_local(run_id=run_id)
if metric_dict is None:
metric_dict = self._load_metrics_from_remote(run_id=run_id)
for metric, metric_history in metric_dict.items():
for epoch in range(len(metric_history)):
metric_records.append({
"run_id": run_id,
metric: metric_history[epoch],
"epoch": epoch,
})
return pd.merge(
pd.DataFrame.from_records(metric_records), self.run_df, left_on="run_id", right_on="info_run_id", how="left"
)
def load_training_times_for_ids(
self, run_ids: Set[str], reference_metric_name: str = "val_loss_times"
):
metric_records = []
for run_id in tqdm(run_ids, desc="Querying metrics for runs"):
metric_dict = self._load_metrics_from_local(run_id=run_id)
if metric_dict is None or reference_metric_name not in metric_dict:
metric_dict = self._load_metrics_from_remote(run_id=run_id)
if reference_metric_name not in metric_dict:
print("Error! Reference Metric not in metric_dict", reference_metric_name, run_id)
continue
times = [int(x) for x in metric_dict[reference_metric_name]]
metric_records.append({
"run_id": run_id,
"num_epochs": len(times),
"total_duration": max(times) - min(times),
"avg_per_epoch": (max(times) - min(times)) / len(times),
})
return pd.merge(
| pd.DataFrame.from_records(metric_records) | pandas.DataFrame.from_records |
import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata= | pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False) | pandas.read_csv |
import os
import re
import sys
import glob
import logging
import argparse
from pathlib import Path
from collections import Counter
import pandas as pd
import adsapi
def read_bib(filename):
"""Read bib from the tex file
separate the bibitem into cite, key, bib
"\bibitem[cite]{key} bib"
Args:
filename (string): file name
Returns:
df (DataFrame): bib data
"""
bib_items = list()
with open(filename) as f:
bib_tag = False
bib_lines = list()
for line in f:
if "\\end{thebibliography}" in line:
bib_tag = False
item = "".join(bib_lines)
if item != "":
bib_items.append(item)
if bib_tag == True:
if line.strip() != "":
if "\\bibitem[" in line:
if len(bib_lines) > 0:
bib_items.append(" ".join(bib_lines))
bib_lines = [line.strip()]
else:
bib_lines.append(line.strip())
if "\\begin{thebibliography}" in line:
bib_tag = True
info_list = list()
pd.options.mode.chained_assignment = None
if len(bib_items) > 0:
for bib_item in bib_items:
info_list.append(extract_info(bib_item))
df = pd.DataFrame(info_list)
else:
df = pd.DataFrame(
columns=[
"au1_f",
"au1_l",
"au2_f",
"au2_l",
"au3_f",
"au3_l",
"au1_f_low",
"au1_l_low",
"au2_f_low",
"au2_l_low",
"au3_f_low",
"au3_l_low",
"bib",
"cite",
"key",
"num",
"year",
]
)
return df
def extract_info(bib_item):
"""Extract info from bib_item
Args:
bib_item (string): bib item
Returns:
info (dict): info dictionary
"""
info = dict()
info["cite"] = bib_item.split("\\bibitem[")[1].split("]")[0]
info["key"] = (
bib_item.split("\\bibitem[")[1].split("]")[1].split("{")[1].split("}")[0]
)
bib = (
bib_item.split("\\bibitem[")[1]
.split("]")[1][bib_item.split("\\bibitem[")[1].split("]")[1].find("}") + 1 :]
.strip()
)
if bib[-1] == ".":
info["bib"] = bib[:-1]
else:
info["bib"] = bib
item = | pd.Series(info) | pandas.Series |
import pandas as pd
import re
from tqdm import tqdm
import xml.etree.ElementTree as ET
from pathlib import Path
def get_citations(doc_type, citation_cols, doc_id, doc_title, doc_author, doc_root):
if doc_type != 'research-article':
return pd.DataFrame()
xml_cits = doc_root.findall('back/fn-group/fn/p/mixed-citation')
citation_block = {}
for i in range(len(xml_cits)):
try:
cit_author = xml_cits[i].find('person-group/string-name/surname').text
except AttributeError:
cit_author = ''
try:
cit_title = xml_cits[i].find('source').text
except AttributeError:
cit_title = ''
try:
cit_year = xml_cits[i].find('year').text
except AttributeError:
cit_year = ''
try:
cit_reference = xml_cits[i].text
except AttributeError:
cit_reference = ''
citation = (doc_id, doc_title, doc_author, cit_author, cit_title, cit_year, cit_reference)
citation_block[i] = citation
block_df = | pd.DataFrame.from_dict(citation_block, columns=citation_cols, orient='index') | pandas.DataFrame.from_dict |
import numpy as np
import os
import tensorflow as tf
import pathlib
import pandas as pd
import re
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten , Conv1D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D,MaxPooling1D
from tensorflow.keras.utils import plot_model
tf.config.run_functions_eagerly(True)
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 16
img_height = 224
img_width = 224
final_class = 8
image_channels = 3
class_names = [
"Normal (N)",
"Diabetes (D)",
"Glaucoma (G)",
"Cataract (C)",
"Age related Macular Degeneration (A)",
"Hypertension (H)",
"Pathological Myopia (M)",
"Other diseases/abnormalities (O)"
]
def file_exists(file_path):
[exists] = tf.py_function(_file_exists, [file_path], [tf.bool])
exists.set_shape([])
return exists
def _file_exists(file_path):
return tf.io.gfile.exists(file_path.numpy())
def build_label_dictionary(df):
dict = {}
for index, row in df.iterrows():
image_id = int(row['ID'])
image_target = np.asarray(eval(row["target"]))
dict[image_id] = image_target
return dict
data_dir = pathlib.Path("./input/ocular-disease-recognition-odir5k/preprocessed_images/")
df = | pd.read_csv('./input/ocular-disease-recognition-odir5k/full_df.csv') | pandas.read_csv |
""" test partial slicing on Series/Frame """
import pytest
from datetime import datetime, date
import numpy as np
import pandas as pd
import operator as op
from pandas import (DatetimeIndex, Series, DataFrame,
date_range, Index, Timedelta, Timestamp)
from pandas.util import testing as tm
class TestSlicing(object):
def test_slice_year(self):
dti = DatetimeIndex(freq='B', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
result = s['2005']
expected = s[s.index.year == 2005]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
result = df.loc['2005']
expected = df[df.index.year == 2005]
tm.assert_frame_equal(result, expected)
rng = date_range('1/1/2000', '1/1/2010')
result = rng.get_loc('2009')
expected = slice(3288, 3653)
assert result == expected
def test_slice_quarter(self):
dti = DatetimeIndex(freq='D', start=datetime(2000, 6, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2001Q1']) == 90
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['1Q01']) == 90
def test_slice_month(self):
dti = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(dti)), index=dti)
assert len(s['2005-11']) == 30
df = DataFrame(np.random.rand(len(dti), 5), index=dti)
assert len(df.loc['2005-11']) == 30
tm.assert_series_equal(s['2005-11'], s['11-2005'])
def test_partial_slice(self):
rng = DatetimeIndex(freq='D', start=datetime(2005, 1, 1), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-05':'2006-02']
expected = s['20050501':'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-05':]
expected = s['20050501':]
tm.assert_series_equal(result, expected)
result = s[:'2006-02']
expected = s[:'20060228']
tm.assert_series_equal(result, expected)
result = s['2005-1-1']
assert result == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31')
def test_partial_slice_daily(self):
rng = DatetimeIndex(freq='H', start=datetime(2005, 1, 31), periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-31']
tm.assert_series_equal(result, s.iloc[:24])
pytest.raises(Exception, s.__getitem__, '2004-12-31 00')
def test_partial_slice_hourly(self):
rng = DatetimeIndex(freq='T', start=datetime(2005, 1, 1, 20, 0, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60 * 4])
result = s['2005-1-1 20']
tm.assert_series_equal(result, s.iloc[:60])
assert s['2005-1-1 20:00'] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:15')
def test_partial_slice_minutely(self):
rng = DatetimeIndex(freq='S', start=datetime(2005, 1, 1, 23, 59, 0),
periods=500)
s = Series(np.arange(len(rng)), index=rng)
result = s['2005-1-1 23:59']
tm.assert_series_equal(result, s.iloc[:60])
result = s['2005-1-1']
tm.assert_series_equal(result, s.iloc[:60])
assert s[Timestamp('2005-1-1 23:59:00')] == s.iloc[0]
pytest.raises(Exception, s.__getitem__, '2004-12-31 00:00:00')
def test_partial_slice_second_precision(self):
rng = DatetimeIndex(start=datetime(2005, 1, 1, 0, 0, 59,
microsecond=999990),
periods=20, freq='US')
s = Series(np.arange(20), rng)
tm.assert_series_equal(s['2005-1-1 00:00'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:00:59'], s.iloc[:10])
tm.assert_series_equal(s['2005-1-1 00:01'], s.iloc[10:])
tm.assert_series_equal(s['2005-1-1 00:01:00'], s.iloc[10:])
assert s[Timestamp('2005-1-1 00:00:59.999990')] == s.iloc[0]
tm.assert_raises_regex(KeyError, '2005-1-1 00:00:00',
lambda: s['2005-1-1 00:00:00'])
def test_partial_slicing_dataframe(self):
# GH14856
# Test various combinations of string slicing resolution vs.
# index resolution
# - If string resolution is less precise than index resolution,
# string is considered a slice
# - If string resolution is equal to or more precise than index
# resolution, string is considered an exact match
formats = ['%Y', '%Y-%m', '%Y-%m-%d', '%Y-%m-%d %H',
'%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S']
resolutions = ['year', 'month', 'day', 'hour', 'minute', 'second']
for rnum, resolution in enumerate(resolutions[2:], 2):
# we check only 'day', 'hour', 'minute' and 'second'
unit = Timedelta("1 " + resolution)
middate = datetime(2012, 1, 1, 0, 0, 0)
index = DatetimeIndex([middate - unit,
middate, middate + unit])
values = [1, 2, 3]
df = DataFrame({'a': values}, index, dtype=np.int64)
assert df.index.resolution == resolution
# Timestamp with the same resolution as index
# Should be exact match for Series (return scalar)
# and raise KeyError for Frame
for timestamp, expected in zip(index, values):
ts_string = timestamp.strftime(formats[rnum])
# make ts_string as precise as index
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == expected
pytest.raises(KeyError, df.__getitem__, ts_string)
# Timestamp with resolution less precise than index
for fmt in formats[:rnum]:
for element, theslice in [[0, slice(None, 1)],
[1, slice(1, None)]]:
ts_string = index[element].strftime(fmt)
# Series should return slice
result = df['a'][ts_string]
expected = df['a'][theslice]
tm.assert_series_equal(result, expected)
# Frame should return slice as well
result = df[ts_string]
expected = df[theslice]
tm.assert_frame_equal(result, expected)
# Timestamp with resolution more precise than index
# Compatible with existing key
# Should return scalar for Series
# and raise KeyError for Frame
for fmt in formats[rnum + 1:]:
ts_string = index[1].strftime(fmt)
result = df['a'][ts_string]
assert isinstance(result, np.int64)
assert result == 2
pytest.raises(KeyError, df.__getitem__, ts_string)
# Not compatible with existing key
# Should raise KeyError
for fmt, res in list(zip(formats, resolutions))[rnum + 1:]:
ts = index[1] + Timedelta("1 " + res)
ts_string = ts.strftime(fmt)
pytest.raises(KeyError, df['a'].__getitem__, ts_string)
pytest.raises(KeyError, df.__getitem__, ts_string)
def test_partial_slicing_with_multiindex(self):
# GH 4758
# partial string indexing with a multi-index buggy
df = DataFrame({'ACCOUNT': ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],
'TICKER': ["ABC", "MNP", "XYZ", "XYZ"],
'val': [1, 2, 3, 4]},
index=date_range("2013-06-19 09:30:00",
periods=4, freq='5T'))
df_multi = df.set_index(['ACCOUNT', 'TICKER'], append=True)
expected = DataFrame([
[1]
], index=Index(['ABC'], name='TICKER'), columns=['val'])
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1')]
tm.assert_frame_equal(result, expected)
expected = df_multi.loc[
(pd.Timestamp('2013-06-19 09:30:00', tz=None), 'ACCT1', 'ABC')]
result = df_multi.loc[('2013-06-19 09:30:00', 'ACCT1', 'ABC')]
tm.assert_series_equal(result, expected)
# this is a KeyError as we don't do partial string selection on
# multi-levels
def f():
df_multi.loc[('2013-06-19', 'ACCT1', 'ABC')]
pytest.raises(KeyError, f)
# GH 4294
# partial slice on a series mi
s = pd.DataFrame(np.random.rand(1000, 1000), index=pd.date_range(
'2000-1-1', periods=1000)).stack()
s2 = s[:-1].copy()
expected = s2['2000-1-4']
result = s2[pd.Timestamp('2000-1-4')]
tm.assert_series_equal(result, expected)
result = s[pd.Timestamp('2000-1-4')]
expected = s['2000-1-4']
tm.assert_series_equal(result, expected)
df2 = pd.DataFrame(s)
expected = df2.xs('2000-1-4')
result = df2.loc[pd.Timestamp('2000-1-4')]
| tm.assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# Standard library imports
import json
from urllib.request import urlopen
# Third party imports
import pandas as pd
import plotly.express as px
import plotly.graph_objs as go
from plotly.graph_objs import Layout
from plotly.offline import plot
# Local application imports
from . import getdata
# file for creation of plotly figures(figs)
# you can use the plotly builtin fig.show() method to plot locally
def total_growth():
"""[summary] Plots cumulative growth in a logarithmic y-scale
Reference: https://plotly.com/python/line-and-scatter/
Returns:
[plotly.graph_objs] -- [plot_div compatible with Django]
"""
df = getdata.realtime_growth()
dates = | pd.to_datetime(df.index) | pandas.to_datetime |
"""
test_subcomp_c.py
Contains the test class for subcomp_c_multi_model_stats.py.
"""
import time
import glob
import unittest
import cftime
import pandas as pd
import xarray as xr
import numpy as np
from phase1_data_wrangler.analysis_parameters import \
DIR_TESTING_DATA, VARIABLE_ID
from phase1_data_wrangler.subcomp_c_multi_model_stats import \
initialize_empty_mms_arrays, fill_empty_arrays, create_xr_dataset, \
get_scenario_fnames, read_in_fname
DATA_PATH = DIR_TESTING_DATA+'processed_model_data/'
SCENARIO = 'historical'
FNAME_TEST = 'tas_historical_CAMS-CSM1-0'
VARIABLE_NAME = VARIABLE_ID
NORMALIZED = False
NUM_CHUNKS = 20
EXP_TYPES = np.array([xr.core.dataarray.DataArray,
np.ndarray,
np.float64,
cftime._cftime.DatetimeProlepticGregorian])
[EMPTY_DSETS,
DIM_INFO, DIMS,
FILE_NAMES,
DATASETS] = initialize_empty_mms_arrays(data_path=DATA_PATH, scenario_name=SCENARIO,
num_chunks=NUM_CHUNKS, normalized=NORMALIZED)
[LATS, LONS, TIMES] = DIMS
[MULTI_MODEL_MEANS,
MULTI_MODEL_MINS,
MULTI_MODEL_MAXS,
MULTI_MODEL_STDS] = fill_empty_arrays(empty_dsets=EMPTY_DSETS, dim_info=DIM_INFO,
file_names=FILE_NAMES, datasets=DATASETS,
varname=VARIABLE_NAME, num_chunks=NUM_CHUNKS)
DS = create_xr_dataset(lats=LATS, lons=LONS, times=TIMES,
mean_vals=MULTI_MODEL_MEANS, max_vals=MULTI_MODEL_MAXS,
min_vals=MULTI_MODEL_MINS, std_vals=MULTI_MODEL_STDS)
#----------------------------------------------------------------------------------
def check_coord_names(ds_processed, ds_coords_expected):
"""Checks whether coordinate names of ds are expected names."""
coords_list = []
for coord in ds_processed.coords:
coords_list.append(coord)
return bool(set(coords_list) == set(ds_coords_expected))
def check_years(ds_processed, min_year, max_year):
"""Check that times are within range of plausible years for the output."""
first_date = ds_processed['time'].values[0]
if isinstance(first_date, np.datetime64):
first_yr = | pd.to_datetime(first_date) | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 19:35:13 2019
@author: itamar
"""
"""
The arrow of time—If you’re trying to predict the future given the past (for exam-
ple, tomorrow’s weather, stock movements, and so on), you should not ran-
domly shuffle your data before splitting it, because doing so will create a
temporal leak: your model will effectively be trained on data from the future. In
such situations, you should always make sure all data in your test set is posterior
to the data in the training set.
Próximos passos : adicionar todos os dias do ano
de forma que de pra manipular os trimestres de forma exata
depois adicionar os dados fundamentalistas
testar metodos com todos os dias e com apenas alguns
"""
import numpy as np
import pandas as pd
import pickle
#import functions as ms
import testing as ts
from collections import Counter
from sklearn.preprocessing import StandardScaler, RobustScaler
from sklearn.model_selection import cross_validate
from sklearn import svm, neighbors
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Dense, LSTM
from sklearn.preprocessing import MinMaxScaler
import xgboost as xgb
main = pd.DataFrame()
#main = ts.test_compiled_dataframe('construção')
train_test , prediction,dataset = ts.test_preprocessed_dataframe('construção')
train_test = train_test.sample(frac=1)
y = train_test['180d']
X = train_test.drop(columns = ['180d'])
scaler = MinMaxScaler(feature_range=(0,1))
X = scaler.fit_transform(X)
date = pd.DataFrame(pd.to_datetime(train_test.index).year - 2011)
X = np.append(X,date,axis = 1 )
from sklearn.model_selection import train_test_split
X_train,X_test, y_train, y_test = train_test_split(X,y,test_size = 0.25)
X_train = X_train.reshape(1,18194,91)
X_test = X_test.reshape(1,6065,91)
model = Sequential()
model.add(LSTM(50, input_shape=(18194,91)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mean_squared_error')
model.fit(X_train, y_train, batch_size=1, epochs=1)
"""
xgb_model = xgb.XGBRegressor()
xgb_model.fit(X_train, y_train)
y_pred = xgb_model.predict(X_test)
print('Xg\n r2_score:',r2_score(y_test,y_pred),'\nvariance : ',explained_variance_score(y_test,y_pred))
"""
from sklearn.ensemble import RandomForestRegressor
regressor2 = RandomForestRegressor(n_estimators=10,n_jobs=-1)
regressor2.fit(X_train,y_train)
y_pred2 = regressor2.predict(X_test)
from sklearn.metrics import r2_score, explained_variance_score,SCORERS
#print('Decision tree\n r2_score:',r2_score(y_test,y_pred),'\nvariance : ',explained_variance_score(y_test,y_pred))
print('Random Forest\n r2_score:',r2_score(y_test,y_pred2),'\nvariance : ',explained_variance_score(y_test,y_pred2))
from sklearn.model_selection import cross_val_score,cross_val_predict,cross_validate
accuracies = cross_val_score(estimator = regressor2, X = X, y = y, cv = 10,n_jobs=-1,scoring='r2')
accuracies.mean()
#sorted(SCORERS.keys())
scaler = MinMaxScaler(feature_range=(0,1))
#X_prediction = prediction.drop(columns = ['91d'])
X_prediction = prediction.drop(columns = ['180d'])
X_prediction = scaler.fit_transform(X_prediction)
X_prediction = np.append(X_prediction,pd.DataFrame( | pd.to_datetime(prediction.index) | pandas.to_datetime |
"""
.. module:: trend
:synopsis: Trend Indicators.
.. moduleauthor:: <NAME> (Bukosabino)
"""
import numpy as np
import pandas as pd
from ta.utils import IndicatorMixin, ema, get_min_max
class AroonIndicator(IndicatorMixin):
"""Aroon Indicator
Identify when trends are likely to change direction.
Aroon Up = ((N - Days Since N-day High) / N) x 100
Aroon Down = ((N - Days Since N-day Low) / N) x 100
Aroon Indicator = Aroon Up - Aroon Down
https://www.investopedia.com/terms/a/aroon.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 25, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
rolling_close = self._close.rolling(self._n, min_periods=0)
self._aroon_up = rolling_close.apply(
lambda x: float(np.argmax(x) + 1) / self._n * 100, raw=True)
self._aroon_down = rolling_close.apply(
lambda x: float(np.argmin(x) + 1) / self._n * 100, raw=True)
def aroon_up(self) -> pd.Series:
"""Aroon Up Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_up = self._check_fillna(self._aroon_up, value=0)
return pd.Series(aroon_up, name=f'aroon_up_{self._n}')
def aroon_down(self) -> pd.Series:
"""Aroon Down Channel
Returns:
pandas.Series: New feature generated.
"""
aroon_down = self._check_fillna(self._aroon_down, value=0)
return pd.Series(aroon_down, name=f'aroon_down_{self._n}')
def aroon_indicator(self) -> pd.Series:
"""Aroon Indicator
Returns:
pandas.Series: New feature generated.
"""
aroon_diff = self._aroon_up - self._aroon_down
aroon_diff = self._check_fillna(aroon_diff, value=0)
return pd.Series(aroon_diff, name=f'aroon_ind_{self._n}')
class MACD(IndicatorMixin):
"""Moving Average Convergence Divergence (MACD)
Is a trend-following momentum indicator that shows the relationship between
two moving averages of prices.
https://school.stockcharts.com/doku.php?id=technical_indicators:moving_average_convergence_divergence_macd
Args:
close(pandas.Series): dataset 'Close' column.
n_fast(int): n period short-term.
n_slow(int): n period long-term.
n_sign(int): n period to signal.
fillna(bool): if True, fill nan values.
"""
def __init__(self,
close: pd.Series,
n_slow: int = 26,
n_fast: int = 12,
n_sign: int = 9,
fillna: bool = False):
self._close = close
self._n_slow = n_slow
self._n_fast = n_fast
self._n_sign = n_sign
self._fillna = fillna
self._run()
def _run(self):
self._emafast = ema(self._close, self._n_fast, self._fillna)
self._emaslow = ema(self._close, self._n_slow, self._fillna)
self._macd = self._emafast - self._emaslow
self._macd_signal = ema(self._macd, self._n_sign, self._fillna)
self._macd_diff = self._macd - self._macd_signal
def macd(self) -> pd.Series:
"""MACD Line
Returns:
pandas.Series: New feature generated.
"""
macd = self._check_fillna(self._macd, value=0)
return pd.Series(macd, name=f'MACD_{self._n_fast}_{self._n_slow}')
def macd_signal(self) -> pd.Series:
"""Signal Line
Returns:
pandas.Series: New feature generated.
"""
macd_signal = self._check_fillna(self._macd_signal, value=0)
return pd.Series(macd_signal, name=f'MACD_sign_{self._n_fast}_{self._n_slow}')
def macd_diff(self) -> pd.Series:
"""MACD Histogram
Returns:
pandas.Series: New feature generated.
"""
macd_diff = self._check_fillna(self._macd_diff, value=0)
return pd.Series(macd_diff, name=f'MACD_diff_{self._n_fast}_{self._n_slow}')
class EMAIndicator(IndicatorMixin):
"""EMA - Exponential Moving Average
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 14, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
def ema_indicator(self) -> pd.Series:
"""Exponential Moving Average (EMA)
Returns:
pandas.Series: New feature generated.
"""
ema_ = ema(self._close, self._n, self._fillna)
return pd.Series(ema_, name=f'ema_{self._n}')
class TRIXIndicator(IndicatorMixin):
"""Trix (TRIX)
Shows the percent rate of change of a triple exponentially smoothed moving
average.
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:trix
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
"""
def __init__(self, close: pd.Series, n: int = 15, fillna: bool = False):
self._close = close
self._n = n
self._fillna = fillna
self._run()
def _run(self):
ema1 = ema(self._close, self._n, self._fillna)
ema2 = ema(ema1, self._n, self._fillna)
ema3 = ema(ema2, self._n, self._fillna)
self._trix = (ema3 - ema3.shift(1, fill_value=ema3.mean())) / ema3.shift(1, fill_value=ema3.mean())
self._trix *= 100
def trix(self) -> pd.Series:
"""Trix (TRIX)
Returns:
pandas.Series: New feature generated.
"""
trix = self._check_fillna(self._trix, value=0)
return | pd.Series(trix, name=f'trix_{self._n}') | pandas.Series |
#%%
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pymaid_creds import url, name, password, token
import pymaid
import connectome_tools.cluster_analysis as clust
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
rm = pymaid.CatmaidInstance(url, token, name, password)
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
# %%
# load clusters and adj
cluster_lvl = 7
clusters = clust.Analyze_Cluster(cluster_lvl=cluster_lvl-1) # 0 indexing
# set up weak and strong edge adjacency matrices
ad_adj = pm.Promat.pull_adj(type_adj='ad', subgraph='brain and accessory')
ad_adj_weak = ad_adj.copy()
ad_adj_weak[ad_adj_weak>1] = 0
ad_adj_strong = ad_adj.copy()
ad_adj_strong[ad_adj_strong<5] = 0
# pull connectivity
strong_connect = clusters.cluster_cta.connectivity(ad_adj_strong)
weak_connect = clusters.cluster_cta.connectivity(ad_adj_weak)
# %%
# generate data
strong_df = []
for i in strong_connect.index:
intra = strong_connect.loc[i, i]
inter = sum(strong_connect.loc[i, strong_connect.columns.drop(i)])
total = intra + inter
intra = intra/total
inter = inter/total
strong_df.append([i, intra, inter])
strong_df = pd.DataFrame(strong_df, columns = ['cluster', 'strong_intra', 'strong_inter'])
strong_df.set_index('cluster', drop=True, inplace=True)
weak_df = []
for i in weak_connect.index:
intra = weak_connect.loc[i, i]
inter = sum(weak_connect.loc[i, weak_connect.columns.drop(i)])
total = intra + inter
intra = intra/total
inter = inter/total
weak_df.append([i, intra, inter])
weak_df = pd.DataFrame(weak_df, columns = ['cluster', 'weak_intra', 'weak_inter'])
weak_df.set_index('cluster', drop=True, inplace=True)
data_df = pd.concat([strong_df, weak_df], axis=1)
data_df.drop(65, inplace=True) # drop last cluster which has no connectivity
data_df['cluster'] = data_df.index
plot_df = | pd.melt(data_df, id_vars='cluster', var_name='connectivity', value_name='fraction') | pandas.melt |
import torch
from torchtext.legacy import data
from torchtext.legacy.data import Field, BucketIterator
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
import json
import pandas as pd
from .SeqDataset import SeqDataset
class CommonsenseQADataset(SeqDataset):
def __init__(self, data_path, seed, batch_size, device, split_ratio=[0.7, 0.3]):
# super(QuoraDataset, self).__init__(data_path, seed, batch_size, device, split_ratio)
self.split_ratio = split_ratio
self.data_path = data_path
self.seed = seed
self.device = device
self.batch_size = batch_size
self.seq_data = self.load_data(self.data_path)
def load_data(self, data_path):
# download data
if not isinstance(data_path, list):
data_path = [data_path]
# extract data
seq_data = pd.DataFrame()
for url in data_path:
resp = urlopen(url).read().decode()
data = | pd.read_json(resp,lines=True) | pandas.read_json |
import PySimpleGUI as sg
import pandas as pd
from functools import reduce
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from math import pi
from pylab import *
def user_input_GUI():
global stock_share_hash, index_hash, chart_num
layout = [
[sg.Text('Please enter Portfolio and its individual stock share', font=("Helvetica bold", 20))],
[sg.Text('Portfolio', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='stock', do_not_clear=True, font=("Helvetica", 16))],
[sg.Text('Share', size=(7, 1), font=("Helvetica", 16)),
sg.InputText('', key='share', do_not_clear=True, font=("Helvetica", 16))],
[sg.Text('Data Timeline:', font=("Helvetica bold", 16))],
[sg.InputCombo(('Most Recent Week', 'Most Recent Month', 'All Data'), key='time', font=("Helvetica", 16), size=(16, 1))],
[sg.Text('Number of Radar Chart (max 8):', font=("Helvetica bold", 16))],
[sg.InputText('3', key='chart', do_not_clear=True, size=(3, 1), font=("Helvetica", 16))],
[sg.Text('Indices Weight (0 - 1)', font=("Helvetica bold", 16))],
[sg.Text('SPI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='spi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('TPI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='tpi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('SLI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='sli_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('PRI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='pri_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16)),
sg.Text('ATSI:', size=(4, 1), font=("Helvetica", 16)),
sg.InputText('1', key='atsi_weight', do_not_clear=True, size=(3, 1), font=("Helvetica", 16))],
[sg.Submit('Analyze', font=("Helvetica", 16)), sg.Exit(font=("Helvetica", 16))]
]
window = sg.Window('Client Tool for Finding Optimal ATS', location=(800, 50)).Layout(layout)
while True:
event, stock_share_hash_old = window.Read()
if event is None or event == 'Exit':
break
else:
for key, value in stock_share_hash_old.items():
stock_share_hash_old.update({key: value.split(',')})
newlist = []
for value in stock_share_hash_old['share']:
newlist.append(int(value))
stock_share_hash_old.update({'share': newlist})
stock_share_hash = {}
for index in range(len(stock_share_hash_old['stock'])):
stock_share_hash[stock_share_hash_old['stock'][index].upper()] = stock_share_hash_old['share'][index]
chart_num = int(stock_share_hash_old['chart'][0])
time = stock_share_hash_old['time'][0]
index_hash = {}
index_hash.update({'spi_weight': stock_share_hash_old['spi_weight']})
# stock_share_hash.pop('spi_weight')
index_hash.update({'tpi_weight': stock_share_hash_old['tpi_weight']})
# stock_share_hash.pop('tpi_weight')
index_hash.update({'sli_weight': stock_share_hash_old['sli_weight']})
# stock_share_hash.pop('sli_weight')
index_hash.update({'pri_weight': stock_share_hash_old['pri_weight']})
# stock_share_hash.pop('pri_weight')
index_hash.update({'atsi_weight': stock_share_hash_old['atsi_weight']})
# stock_share_hash.pop('atsi_weight')
# Remove spaces in key
stock_share_hash = {k.replace(' ', ''): v for k, v in stock_share_hash.items()}
finra = subset_data(choice=time, finra_data=finra_data)
overall_score(input=stock_share_hash, finra_data=finra, sector_data=sector_data)
market_liquidity_ratio(stock_share_hash=stock_share_hash, finra_data=finra, ratio_data=ratio_data)
sg.Popup('Most Optimal ATS for Routing this Portfolio:',
stock_share_hash, score_sorted, '\n'.join(list_mlr), font=("Helvetica", 16), location=(800, 450))
window.Close()
return
def subset_data(choice, finra_data):
global week
finra_data['Week'] = pd.to_datetime(finra_data['Week'])
if choice == 'Most Recent Week':
week = 1*5
data = finra_data[finra_data['Week'] == min(finra_data.Week.unique())]
elif choice == 'Most Recent Month':
week = 4*5
data = finra_data[finra_data['Week'].isin(sorted(finra_data.Week.unique())[0:4])]
else:
data = finra_data
week = len(data.Week.unique())*5
return data
def portfolio_share_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Shares'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
def portfolio_trade_prop_index(portfolio, data):
portfolio_data = data[data['Symbol'].isin(portfolio)]
ats_list = data.ATS_MPID.unique()
hash_portfolio = {stock: [] for stock in portfolio}
for stock in portfolio:
each_stock = portfolio_data[portfolio_data['Symbol'] == stock]
stock_sum_by_ats = each_stock.groupby(['ATS_MPID'])['Trades'].sum()
model = stock_sum_by_ats / sum(stock_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_portfolio.update({stock: model.sort_values(ascending=False)})
worthfullness_index = pd.Series()
for ats in ats_list:
if ats not in worthfullness_index.index:
new_ats = pd.Series([0], index=[ats])
worthfullness_index = worthfullness_index.append(new_ats)
for stock in portfolio:
worthfullness_index += hash_portfolio[stock]
worthfullness_index_normalized = \
(worthfullness_index - min(worthfullness_index)) / (max(worthfullness_index) - min(worthfullness_index))
# worthfullness_index /= len(portfolio)
return worthfullness_index_normalized
# test_portfolio = ['A', 'AA']
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# portfolio_share_prop_index(test_portfolio, data)
# a = portfolio_share_prop_index(test_portfolio, data) + portfolio_trade_prop_index(portfolio, data)
def sector_liquidity_index(portfolio, data, sector_data):
sector_list = []
sector_stock_hash = {}
hash_index = {}
ats_list = data.ATS_MPID.unique()
for stock in portfolio:
sector_list.append(sector_data.loc[sector_data['Symbol'] == stock, 'sector'].iloc[0])
sector_list = set(sector_list)
for sector in sector_list:
sector_stock_hash.update(
{sector: sector_data.loc[sector_data['sector'] == sector, 'Symbol'].values[:].tolist()})
for sector in sector_stock_hash:
portfolio_data = data[data['Symbol'].isin(sector_stock_hash[sector])]
sector_sum_by_ats = portfolio_data.groupby(['ATS_MPID'])['Shares'].sum()
model = sector_sum_by_ats / sum(sector_sum_by_ats)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_index.update({sector: model})
sl_index = pd.Series()
for ats in ats_list:
if ats not in sl_index.index:
new_ats = pd.Series([0], index=[ats])
sl_index = sl_index.append(new_ats)
for sector in sector_list:
sl_index += hash_index[sector]
sl_index_normalized = (sl_index - min(sl_index)) / (max(sl_index) - min(sl_index))
# sl_index /= len(sector_list)
return sl_index_normalized
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
# sector_data = pd.read_csv('/Users/TonY/Desktop/capstone/market_cap_sector_mktcapcategory_by_symbol.csv', encoding='utf-8')
# test_portfolio = ['A', 'AA', 'ADRO', 'AABA']
# b = sector_liquidity_index(test_portfolio, data, sector_data)
# len(b)
def participation_rate_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
result = data_selected.groupby('ATS_MPID')['Shares'].sum() / week
model = hash_portfolio_share[stock] / result
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = pd.Series()
for ats in ats_list:
if ats not in pr_index.index:
new_ats = pd.Series([0], index=[ats])
pr_index = pr_index.append(new_ats)
for stock in hash_portfolio_share:
pr_index += hash_par_rate_index[stock]
pr_index_normalized = (pr_index - min(pr_index)) / (max(pr_index) - min(pr_index))
# pr_index /= len(hash_portfolio_share)
for i in range(len(pr_index_normalized)):
if pr_index_normalized[i] != 0:
pr_index_normalized[i] = 1 - pr_index_normalized[i]
return pr_index_normalized
# data = pd.read_csv("/Users/TonY/Desktop/capstone/finra.csv")
#
# hash_portfolio_share = {'A': 100, "AA": 200}
# participation_rate_index(hash_portfolio_share, data)
def avg_trade_size_index(hash_portfolio_share, data):
hash_par_rate_index = {}
ats_list = data.ATS_MPID.unique()
for stock in hash_portfolio_share:
data_selected = data.loc[data['Symbol'] == stock]
share_sum = data_selected.groupby('ATS_MPID')['Shares'].sum()
trade_sum = data_selected.groupby('ATS_MPID')['Trades'].sum()
model = hash_portfolio_share[stock] / ((share_sum / trade_sum) / week)
# model_normalized = (model - min(model)) / (max(model) - min(model))
for ats in ats_list:
if ats not in model.index:
new_ats = pd.Series([0], index=[ats])
model = model.append(new_ats)
hash_par_rate_index.update({stock: model})
pr_index = | pd.Series() | pandas.Series |
import pandas as pd
from pyadlml.dataset import ACTIVITY, DEVICE, START_TIME, END_TIME
from pyadlml.dataset.obj import Data
from pyadlml.dataset.devices import correct_devices
from pyadlml.dataset.activities import correct_activities, _create_activity_df
from pyadlml.dataset.io import fetch_handler as _fetch_handler
import os
MITLAB_URL = 'https://mega.nz/file/MB4BFL6S#8MjAQoS-j0Lje1UFoWUMOCay2FcdpVfla6p9MTe4SQM'
MITLAB_FILENAME = 'mitlab.zip'
def fetch_mitlab(keep_original=False, cache=True, retain_corrections=False, subject='subject1'):
"""
Fetches the :ref:`mitlab <ds_mitlab>` dataset from the internet. The original dataset or its cached version
is stored in the :ref:`data home <storage>` folder.
Parameters
----------
keep_original : bool, default=True
Determines whether the original dataset is deleted after downloading
or kept on the hard drive.
cache : bool, default=True
Determines whether the data object should be stored as a binary file for quicker access.
For more information how caching is used refer to the :ref:`user guide <storage>`.
retain_corrections : bool, default=False
When set to *true*, data points that change or drop during preprocessing
are listed in respective attributes of the data object. Fore more information
about error correction refer to the :ref:`user guide <error_correction>`.
subject : str of {'subject1', 'subject2'}
determines
Returns
-------
data : object
"""
assert subject in ['subject1', 'subject2']
dataset_name = 'mitlab'
def load_mitlab(folder_path):
act_path = os.path.join(folder_path, subject, "Activities.csv")
dev_path = os.path.join(folder_path, subject, "sensors.csv")
data_path = os.path.join(folder_path, subject, "activities_data.csv")
df_dev_map = _load_device_map(dev_path)
df_act_map = _load_activity_map(act_path)
df_dev, df_act = _read_data(data_path, df_dev_map, df_act_map)
df_act, cor_lst = correct_activities(df_act)
df_dev = correct_devices(df_dev)
lst_act = df_act[ACTIVITY].unique()
lst_dev = df_dev[DEVICE].unique()
data = Data(df_act, df_dev, activity_list=lst_act, device_list=lst_dev)
data.df_dev_map = df_dev_map
data.df_act_map = df_act_map
if retain_corrections:
data.correction_activities = cor_lst
return data
data = _fetch_handler(keep_original, cache, dataset_name,
MITLAB_FILENAME, MITLAB_URL,
load_mitlab, data_postfix=subject)
return data
def _load_device_map(path_to_file):
df_subx_dev = pd.read_csv(path_to_file, sep=",", header=None)
df_subx_dev.columns = ['id', 'room', 'device']
df_subx_dev['device'] = df_subx_dev['id'].astype(str) + ' - ' \
+ df_subx_dev['room'] + ' ' + df_subx_dev['device']
df_subx_dev = df_subx_dev.drop(columns='room')
df_subx_dev = df_subx_dev.set_index('id')
return df_subx_dev
def _load_activity_map(path_to_file):
return pd.read_csv(path_to_file, sep=",")
def _read_data(path_to_file, df_dev, df_act):
""" creates the device dataframe and activity dataframe
The data is present in the following format:
ACTIVITY_LABEL,DATE,START_TIME,END_TIME
SENSOR1_ID, SENSOR2_ID, ......
SENSOR1_OBJECT,SENSOR2_OBJECT, .....
SENSOR1_ACTIVATION_TIME,SENSOR2_ACTIVATION_TIME, .....
SENSOR1_DEACTIVATION_TIME,SENSOR2_DEACTIVATION_TIME, .....
where date is in the mm/dd/yyyy format
where time is in the hh:mm:ss format
Parameters
----------
path_to_file: str
Returns
-------
df_devices : pd.DataFrame
df_activities : pd.DataFrame
"""
# create empy dataframes for devices and activities
df_devices = pd.DataFrame(columns=[START_TIME, END_TIME, DEVICE])
df_activities = _create_activity_df()
act_list = df_act['Subcategory'].values
with open(path_to_file, 'r') as f_o:
i = 0
read_in_device = False
date = None
for line in f_o.readlines():
assert i in [0,1,2,3]
s = line.split(',')
# check if the line represents an activity
if s[0] in act_list:
assert len(s) == 4
date = s[1]
"""
there is an error where the date is 4/31/2003 which doesn't exist in
subject 2 data. Convert this to the next day
"""
if date == '4/31/2003':
date = '5/1/2003'
new_row = {'activity':s[0],
'start_time':pd.Timestamp(date +'T'+s[2]),
'end_time':pd.Timestamp(date +'T'+s[3])
}
df_activities = df_activities.append(new_row, ignore_index=True)
continue
# check if the line represents a device
elif not read_in_device:
try:
read_in_device = True
devices = s
# delete the '\n' for every last device
devices[-1:] = [devices[-1:][0][:-1]]
i = 1
continue
except:
raise ValueError
elif i == 1:
i = 2
continue
elif i == 2:
ts_act = s
i = 3
elif i == 3:
ts_deact = s
assert len(ts_act) == len(ts_deact)
# correct timestamps by inserting a 0 where only a single digit is present
for j in range(len(ts_act)):
if len(ts_act[j]) != 8:
ts = ts_act[j].split(':')
for k, digit in enumerate(ts):
if len(digit) == 1:
ts[k] = '0' + ts[k]
ts_act[j] = ':'.join(ts)
if len(ts_deact) != 8:
ts = ts_deact[j].split(':')
for k, digit in enumerate(ts):
if len(digit) == 1:
ts[k] = '0' + ts[k]
ts_deact[j] = ':'.join(ts)
# create rows
for dev, ts_start, ts_end in zip(devices, ts_act, ts_deact):
new_row = {DEVICE:dev,
START_TIME:pd.Timestamp(date +'T' + ts_start),
END_TIME: | pd.Timestamp(date +'T' + ts_end) | pandas.Timestamp |
#*- coding: utf-8 -*-
"""
Created on Sun Oct 9 17:37:42 2016
@author: noore
"""
from bigg import BiGG
from kegg import KEGG
import settings
import cache
import colorsys
import sys
from distutils.util import strtobool
import pandas as pd
import os
import json
import seaborn as sns
import numpy as np
from scipy.stats import gmean, ranksums
from matplotlib_venn import venn3
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.gridspec import GridSpec
from matplotlib import rcParams
import pdb # this is a reminder for Elad not to remove this pdb import
from topology import calculate_distances
import itertools
sns.set('paper', style='white')
ORGANISM = 'Escherichia coli'
STAT_TABLE_INDEX = ['all entries',
'keeping only E. coli data',
'filtering out data about mutated enzymes',
'keeping only data mapping to BiGG model',
'unique metabolite-enzyme pairs',
'unique metabolites',
'unique enzymes']
N_ACT_LABEL = 'Number of activating interactions'
N_INH_LABEL = 'Number of inhibiting interactions'
CONDITIONS = ['Glucose', 'Fructose', 'Galactose', 'Gluconate', 'Mannitol',
'Sorbitol', 'Mannose', 'Glycerol', 'Pyruvate', 'Lactate',
'Acetate', 'Succinate', 'glcNAc']
GENERAL_COLOR = '#939598'
CCM_COLOR = '#556B2f'
METABOLITE_COLOR = sns.color_palette('Set2')[3]
ACTIVATOR_COLOR = sns.color_palette('Set2')[0] # green
SUBSTRATE_COLOR = sns.color_palette(settings.HEATMAP_COLORMAP)[-1]
INHIBITOR_COLOR = sns.color_palette(settings.HEATMAP_COLORMAP)[0]
BOTH_COLOR = sns.color_palette('Set2')[5]
# Michaelis-Menten
Vmax = 1 # umol/min
Km = 1 # mM
s_range = np.logspace(-3, 3, 100) # 10 uM - 100 mM
v_s = lambda s: Vmax * s / (Km + s)
eps_s_v = lambda s: 1 - s / (Km + s)
v_x = lambda s: Vmax * (1 - s / (Km + s))
eps_x_v = lambda s: -s / (Km + s)
abs_eps_x_v = lambda s: s / (Km + s)
class FigurePlotter(object):
def __init__(self, rebuild_cache=False):
self.stat_df = pd.DataFrame(index=STAT_TABLE_INDEX,
columns=['km', 'KM_Value',
'regulation', 'KI_Value'])
self.kegg = KEGG()
self.bigg = BiGG()
self.native_mets = self.bigg.get_mets_in_cytosol()
self.native_ECs = self.bigg.get_native_EC_numbers()
self.get_data()
_fname = os.path.join(settings.RESULT_DIR, 'ecoli_interactions.csv')
self.regulation.to_csv(_fname)
def get_kinetic_param(self, name, value_col, organism=ORGANISM):
k = settings.read_cache(name)
self.stat_df[name].iat[0] = k.shape[0] # all entries
self.stat_df[value_col].iat[0] = (k[value_col] > 0).sum()
k = k[k['Organism'].str.lower() == organism.lower()]
self.stat_df[name].iat[1] = k.shape[0] # filtered by organsim
self.stat_df[value_col].iat[1] = (k[value_col] > 0).sum()
k = k[(pd.isnull(k['Commentary'])) |
((k['Commentary'].str.find('mutant') == -1) &
(k['Commentary'].str.find('mutation') == -1) &
(k['Commentary'].str.find('variant') == -1) &
(k['Commentary'].str.find('genetically engineered') == -1))]
self.stat_df[name].iat[2] = k.shape[0] # filtering mutants
self.stat_df[value_col].iat[2] = (k[value_col] > 0).sum()
# remove values with unmatched ligand
k = k[pd.notnull(k['bigg.metabolite'])]
k['bigg.metabolite'] = k['bigg.metabolite'].str.lower()
return k
def filter_non_native_interactions(self, k):
k = k[k['bigg.metabolite'].isin(self.native_mets)]
k = k[k['EC_number'].isin(self.native_ECs)]
return k
@staticmethod
def calc_sat(k, value_col, conc_df, agg_type='gmean'):
# filter missing Km or Ki values and -999 cases.
k = k[k[value_col] > 0]
# choose the minimum/median/gmean value among all repeats
k = k.groupby(['EC_number', 'bigg.metabolite'])[value_col]
if agg_type == 'minimum':
k = k.min()
elif agg_type == 'gmean':
k = k.apply(gmean)
elif agg_type == 'median':
k = k.median()
k = k.reset_index()
# join data with measured concentrations
k = k.join(conc_df, on='bigg.metabolite', how='inner')
# melt table so each line will be a combination of EC,
# substrate/inhibitor and growth condition
k = pd.melt(k, id_vars=('EC_number', 'bigg.metabolite', value_col),
var_name='growth condition', value_name='concentration')
k['saturation'] = k['concentration'] / (k['concentration'] +
k[value_col])
k['met:EC'] = k['bigg.metabolite'].str.cat(k['EC_number'], sep=':')
return k
@staticmethod
def calc_agg_sat(k, agg_type='median', value_col='elasticity'):
"""
calculates the [S]/K_S for all matching EC-metabolite pairs,
in log2-fold-change.
Input:
K_df - a DataFrame with three columns: EC_number,
bigg.metabolite, Value
conc_df - a DataFrame with
"""
k_grp = k.groupby(('bigg.metabolite', 'growth condition'))
if agg_type == 'median':
fc_med = k_grp.median()
elif agg_type == 'gmean':
fc_med = k_grp.agg(lambda x: gmean(list(x)))
fc_med = fc_med[[value_col]].reset_index()
fc_med = fc_med.pivot('bigg.metabolite', 'growth condition',
value_col)
return fc_med.sort_index(axis=0)
@staticmethod
def get_subsystem_data():
"""
Returns:
- 1-to-many mapping BiGG Reaction IDs to cellular subsystems
- many-to-many mapping of BiGG metabolites IDs to subsystems
"""
with open(settings.ECOLI_JSON_FNAME) as fp:
ecoli_model = json.load(fp, encoding='UTF-8')
subsystem_data = []
stoich_data = []
for r in ecoli_model['reactions']:
rid = r['id'].lower()
if 'subsystem' in r:
subsystem_data.append((rid, r['subsystem']))
if 'metabolites' in r:
for met, coeff in r['metabolites'].items():
stoich_data.append((rid, met, coeff))
reaction_subsystem_df = pd.DataFrame(
subsystem_data,
columns=('bigg.reaction', 'bigg.subsystem.reaction'))
reaction_subsystem_df.set_index('bigg.reaction', inplace=True)
stoich_df = pd.DataFrame(stoich_data,
columns=('bigg.reaction',
'bigg.metabolite', 'coeff'))
# now associate every metabolite to subsystems by joining the two
# tables
metabolite_subsystem_df = stoich_df.join(
reaction_subsystem_df, on='bigg.reaction')
metabolite_subsystem_df.rename(
columns={'bigg.subsystem.reaction': 'bigg.subsystem.metabolite'},
inplace=True)
metabolite_subsystem_df.drop('bigg.reaction', axis=1, inplace=True)
metabolite_subsystem_df.drop('coeff', axis=1, inplace=True)
metabolite_subsystem_df.drop_duplicates(inplace=True)
# keep only cytoplasmic metabolites, and remove the suffix _c
metabolite_subsystem_df = metabolite_subsystem_df[
metabolite_subsystem_df['bigg.metabolite'].str[-2:] == '_c']
# then remove the _c suffix and convert to lowercase
metabolite_subsystem_df.loc[:, 'bigg.metabolite'] = \
metabolite_subsystem_df['bigg.metabolite'].map(
lambda s: s[0:-2].lower())
return reaction_subsystem_df, metabolite_subsystem_df
def get_data(self):
_df = pd.DataFrame.from_csv(settings.ECOLI_METAB_FNAME)
mean_data_cols = sum(_df.columns.str.findall('(.*\(mean\).*)'), [])
std_data_cols = sum(_df.columns.str.findall('(.*\(std\).*)'), [])
self.met_conc_mean = _df.loc[:, mean_data_cols] # take only the data columns
self.met_conc_std = _df.loc[:, std_data_cols]
# remove the _c suffix from the compound names and convert to lowercase
self.met_conc_mean.index = self.met_conc_mean.index.map(
lambda s: s[0:-2].lower())
self.met_conc_std.index = self.met_conc_mean.index.map(
lambda s: s[0:-2].lower())
colmap = dict(map(lambda x: (x, x[:-7]), self.met_conc_mean.columns))
self.met_conc_mean.rename(columns=colmap, inplace=True)
# for legacy reasons, also calculate the km and ki tables, without
# filtering out the non-native EC reactions (in order to
# make the full heatmap)
km_raw_unfiltered = self.get_kinetic_param('km', 'KM_Value')
self.km_unfiltered_ALL = km_raw_unfiltered
self.km_unfiltered = FigurePlotter.calc_sat(
km_raw_unfiltered, 'KM_Value', self.met_conc_mean)
regulation_unfiltered = self.get_kinetic_param(
'regulation', 'KI_Value')
self.ki_unfiltered_ALL = regulation_unfiltered
ki_raw_unfiltered = regulation_unfiltered[
~pd.isnull(regulation_unfiltered['KI_Value'])]
self.ki_unfiltered = FigurePlotter.calc_sat(
ki_raw_unfiltered, 'KI_Value', self.met_conc_mean)
km_raw = self.filter_non_native_interactions(km_raw_unfiltered)
self.regulation = self.filter_non_native_interactions(
regulation_unfiltered)
self.calc_unique_stats(km_raw, 'km', 'KM_Value')
self.calc_unique_stats(self.regulation, 'regulation', 'KI_Value')
# choose only one bigg.reaction for each EC number (arbitrarily)
ec2bigg = self.bigg.reaction_df.groupby('EC_number').first()
self.km = FigurePlotter.calc_sat(km_raw, 'KM_Value',
self.met_conc_mean)
self.km['elasticity'] = 1.0 - self.km['saturation']
self.km = self.km.join(ec2bigg, on='EC_number', how='left')
self.ki = FigurePlotter.calc_sat(
self.regulation[~pd.isnull(self.regulation['KI_Value'])],
'KI_Value', self.met_conc_mean)
self.ki['elasticity'] = -self.ki['saturation']
self.ki = self.ki.join(ec2bigg, on='EC_number', how='left')
self.regulation = self.regulation.join(ec2bigg,
on='EC_number', how='left')
# write out SMRN prior to mapping to subsystems
self.regulation.to_csv(os.path.join(settings.CACHE_DIR,
'iJO1366_SMRN.csv'), index=False)
smrn_unique = self.regulation.drop(['Compound', 'LigandName'], axis=1)
smrn_unique = smrn_unique.drop_duplicates()
smrn_unique.to_csv(os.path.join(settings.RESULT_DIR,
'iJO1366_SMRN.csv'), index=False)
self.reaction_subsystem_df, self.metabolite_subsystem_df = \
FigurePlotter.get_subsystem_data()
self.regulation = self.regulation.join(self.reaction_subsystem_df,
on='bigg.reaction', how='left')
self.regulation = pd.merge(self.regulation,
self.metabolite_subsystem_df,
on='bigg.metabolite', how='left')
self.ki.to_csv(os.path.join(settings.RESULT_DIR,
'ki_saturation_full.csv'))
self.km.to_csv(os.path.join(settings.RESULT_DIR,
'km_saturation_full.csv'))
self.stat_df.drop('km', axis=1, inplace=True)
self.stat_df.to_csv(os.path.join(settings.RESULT_DIR,
'statistics.csv'))
def calc_unique_stats(self, k, name, value_col):
self.stat_df[name].iat[3] = k.shape[0]
self.stat_df[name].iat[4] = \
k.groupby(('bigg.metabolite', 'EC_number')).first().shape[0]
self.stat_df[name].iat[5] = \
k.groupby('bigg.metabolite').first().shape[0]
self.stat_df[name].iat[6] = k.groupby('EC_number').first().shape[0]
k_val = k[k[value_col] > 0]
self.stat_df[value_col].iat[3] = k_val.shape[0]
self.stat_df[value_col].iat[4] = \
k_val.groupby(('bigg.metabolite', 'EC_number')).first().shape[0]
self.stat_df[value_col].iat[5] = \
k_val.groupby('bigg.metabolite').first().shape[0]
self.stat_df[value_col].iat[6] = \
k_val.groupby('EC_number').first().shape[0]
def plot_fig4(self):
"""
Panels a-b are for testing the hypothesis that irreversible
reactions are more likely to be regulated allosterically.
Panels c-f show the difference between the distributions of
substrate-enzyme interactions and regulator-enzyme interactions
in terms of Km/Ki, saturation and elasticity.
"""
fig, axs = plt.subplots(3, 2, figsize=(6, 9))
# get the irreversibility constant (absolute) for all reactions
# in the BiGG iJO1336 model
thermo_df = pd.DataFrame.from_csv(settings.ECOLI_THERMO_CACHE_FNAME)
# remove data about reactions with std=0 (i.e. known values)
# and reactions with std > 20 (high uncertainty)
thermo_df = thermo_df[(thermo_df["dG0_prime_std"] > 0) &
(thermo_df["dG0_prime_std"] < 20)]
# select the median value of log(gamma) for each EC number
# (in general, there should be only one value for each
# EC number anyway)
irr_index_l = r"$| log_{10}(\Gamma) |$"
thermo_df[irr_index_l] = thermo_df['log10(RI)'].abs()
thermo_df = thermo_df[~pd.isnull(thermo_df.EC_number)]
# print the regulation table joined with the irreversibility values
_temp_df = self.regulation.join(thermo_df[irr_index_l],
on='EC_number')
_temp_df.to_csv(os.path.join(settings.RESULT_DIR,
'regulation_with_thermo.csv'))
# group the thermo table by EC number and subsystem, while
# taking the median value of the irreversibility index
reg_thermo_df = thermo_df.groupby(['EC_number', 'subsystem'])
reg_thermo_df = reg_thermo_df[irr_index_l].median().reset_index()
# count how many unique interaction each EC number has
# counting by metabolites (ignoring the modes)
reg_count_df = self.regulation.groupby('EC_number')['bigg.metabolite'].nunique()
reg_thermo_df = reg_thermo_df.join(reg_count_df, on='EC_number', how='left')
reg_thermo_df.fillna(0, inplace=True)
reg_thermo_df['num_regulators'] = ''
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] == 0, 'num_regulators'] = '0 regulators'
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'].isin((1, 2)), 'num_regulators'] = '1-2 regulators'
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] > 2, 'num_regulators'] = '3+ regulators'
reg_thermo_df['Regulation'] = ''
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] == 0, 'Regulation'] = 'not regulated'
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] > 0, 'Regulation'] = 'regulated'
reg_thermo_df.to_csv(os.path.join(settings.RESULT_DIR, 'reg_thermo.csv'))
ccm_thermo_df = reg_thermo_df[
reg_thermo_df.subsystem.isin(settings.CCM_SUBSYSTEMS)]
ccm_thermo_df.to_csv(os.path.join(settings.RESULT_DIR,
'CCM_thermodynamics.csv'))
sns.set_palette('Set2', 8, 1)
ax = axs[0, 0]
FigurePlotter.comparative_cdf(x='num_regulators', y=irr_index_l,
data=reg_thermo_df, ax=ax,
title='all E. coli reactions')
ax.set_xlim(0, 10)
ax.plot([3, 3], [0, 1], 'k:', alpha=0.3, linewidth=1)
ranksum_res = ranksums(reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] == 0, irr_index_l],
reg_thermo_df.loc[reg_thermo_df['bigg.metabolite'] > 0, irr_index_l])
ax.set_title('all E. coli reactions\n$p_{ranksum}$ < %.1g' % ranksum_res.pvalue)
ax = axs[0, 1]
FigurePlotter.comparative_cdf(x='num_regulators', y=irr_index_l,
data=ccm_thermo_df, ax=ax,
title='only CCM reactions')
ax.set_xlim(0, 10)
ax.set_ylabel('')
ax.plot([3, 3], [0, 1], 'k:', alpha=0.3, linewidth=1)
ranksum_res = ranksums(ccm_thermo_df.loc[ccm_thermo_df['bigg.metabolite'] == 0, irr_index_l],
ccm_thermo_df.loc[ccm_thermo_df['bigg.metabolite'] > 0, irr_index_l])
ax.set_title('only CCM reactions\n$p_{ranksum}$ < %.1g' % ranksum_res.pvalue)
# correlate irreversibility also with the number of references and
# unique regulating metabolites
num_refs = self.regulation.groupby(
'bigg.reaction')['Literature'].nunique()
ixrefs = num_refs.index.intersection(thermo_df.index)
thermo_df['Num_Refs'] = 0
thermo_df.loc[ixrefs, 'Num_Refs'] = num_refs.loc[ixrefs]
num_regs = self.regulation.groupby(
'bigg.reaction')['bigg.metabolite'].nunique()
ixmets = num_regs.index.intersection(thermo_df.index)
thermo_df['Num_Regs'] = 0
thermo_df.loc[ixmets, 'Num_Regs'] = num_regs.loc[ixmets]
thermo_df['is regulated'] = 'No'
thermo_df.ix[thermo_df['Num_Regs'] > 0, 'is regulated'] = 'Yes'
met_intersection = set(self.km['bigg.metabolite']).intersection(
self.ki['bigg.metabolite'])
km_inter = self.km[self.km['bigg.metabolite'].isin(met_intersection)]
ki_inter = self.ki[self.ki['bigg.metabolite'].isin(met_intersection)]
ax = axs[1, 0]
concentrations = pd.melt(self.met_conc_mean)['value']
concentrations = concentrations[~pd.isnull(concentrations)]
sns.kdeplot(np.log10(concentrations), cumulative=False, ax=ax, bw=.25,
linewidth=2, color=METABOLITE_COLOR, legend=False)
ax.set_xlim(-2.1, 2.1)
ax.set_xticks(np.arange(-2, 3, 1))
ax.set_xticklabels(['0.01', '0.1', '1', '10', '100'])
ax.set_xlabel(r'$[S]$ (in mM)')
ax.set_ylabel(r'Probability density')
ax.set_title('Measured metabolite conc.')
ax = axs[1, 1]
km_values = km_inter.groupby(('met:EC')).first()['KM_Value']
ki_values = ki_inter.groupby(('met:EC')).first()['KI_Value']
sns.kdeplot(np.log10(km_values), cumulative=False,
ax=ax, bw=.25, color=SUBSTRATE_COLOR,
label='substrates (N = %d)' % km_values.shape[0],
linewidth=2)
sns.kdeplot(np.log10(ki_values), cumulative=False,
ax=ax, bw=.25, color=INHIBITOR_COLOR,
label='inhibitors (N = %d)' % ki_values.shape[0],
linewidth=2)
ax.set_xlim(-2.1, 2.7)
ax.set_ylim(0, 0.7)
ax.set_xticks(np.arange(-2, 3, 1))
ax.set_xticklabels(['0.01', '0.1', '1', '10', '100'])
ax.set_xlabel(r'$K_S$ (in mM)')
ax.set_title(r'Measured $K_{\rm S}$ values')
ranksum_res = ranksums(km_values, ki_values)
ax.text(0.5, 0.8, '$p_{ranksum}$ < %.1g' % ranksum_res.pvalue,
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)
ax.legend(loc='upper right')
# compare Km and Ki for the intersection of EC numbers
ax = axs[2, 0]
ki_saturation = ki_inter['saturation']
ki_saturation = ki_saturation[~pd.isnull(ki_saturation)]
km_saturation = km_inter['saturation']
km_saturation = km_saturation[~pd.isnull(km_saturation)]
sns.kdeplot(km_saturation, cumulative=False, ax=ax, bw=.1,
label='substrates (N = %d)' % km_saturation.shape[0],
linewidth=2, color=SUBSTRATE_COLOR)
sns.kdeplot(ki_saturation, cumulative=False, ax=ax, bw=.1,
label='inhibitors (N = %d)' % ki_saturation.shape[0],
linewidth=2, color=INHIBITOR_COLOR)
ax.grid(visible=False)
ax.set_xlim(0, 1)
ax.set_xticks(np.arange(0, 1.01, 0.2))
ax.set_xlabel(r'$\frac{[S]}{[S] + K_S}$')
ax.set_ylabel(r'Probability density')
ax.set_title(r'Saturation levels')
ax.legend(loc='upper center')
ranksum_res = ranksums(km_saturation, ki_saturation)
ax.text(0.5, 0.8, '$p_{ranksum}$ < 10$^{%d}$' %
np.ceil(np.log10(ranksum_res.pvalue)),
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
ax = axs[2, 1]
ki_elasticity = ki_inter['elasticity'].abs()
ki_elasticity = ki_elasticity[~pd.isnull(ki_elasticity)]
km_elasticity = km_inter['elasticity'].abs()
km_elasticity = km_elasticity[~pd.isnull(km_elasticity)]
sns.kdeplot(km_elasticity, cumulative=False, ax=ax, bw=.1,
label='substrates (N = %d)' % km_saturation.shape[0],
linewidth=2, color=SUBSTRATE_COLOR)
sns.kdeplot(ki_elasticity, cumulative=False, ax=ax, bw=.1,
label='inhibitors (N = %d)' % ki_saturation.shape[0],
linewidth=2, color=INHIBITOR_COLOR)
ax.grid(visible=False)
ax.set_xlim(0, 1)
ax.set_xticks(np.arange(0, 1.01, 0.2))
ax.set_xlabel(r'$|\epsilon_s^v|$')
ax.set_title(r'Elasticities')
ax.legend(loc='upper center')
ranksum_res = ranksums(km_elasticity, ki_elasticity)
ax.text(0.5, 0.8, '$p_{ranksum}$ < 10$^{%d}$' %
np.ceil(np.log10(ranksum_res.pvalue)),
horizontalalignment='center',
verticalalignment='top',
transform=ax.transAxes)
for i, ax in enumerate(axs.flat):
ax.annotate(chr(ord('a') + i), xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=14)
fig.tight_layout()
settings.savefig(fig, 'fig4')
def plot_fig5(self):
"""
draw heat maps of the [S]/Ki and [S]/Km values across
the 8 conditions
"""
def count_values(k, value_col):
_tmp = k.groupby(('bigg.metabolite', 'growth condition'))
_tmp = _tmp.count().reset_index().groupby('bigg.metabolite').max()
return _tmp[value_col].apply(int)
fig = plt.figure(figsize=(10, 8))
gs1 = GridSpec(1, 2)
gs1.update(left=0.2, right=0.8, top=0.95, bottom=0.7, wspace=0.2)
ax1 = plt.subplot(gs1[0, 0])
ax2 = plt.subplot(gs1[0, 1])
gs2 = GridSpec(1, 1)
gs2.update(left=0.15, right=0.9, top=0.6, bottom=0.15, wspace=0.1)
ax3 = plt.subplot(gs2[0])
axs = [ax1, ax2, ax3]
ax1.annotate('A', xy=(-0.5, 1.1),
xycoords='axes fraction', ha='left', va='top',
size=10, fontweight='bold')
ax1.annotate('B', xy=(-0.5, -0.25),
xycoords='axes fraction', ha='left', va='top',
size=10, fontweight='bold')
s_range = np.logspace(-3, 3, 1000) # 10 uM - 100 mM
eps = list(map(eps_s_v, s_range))
axs[0].plot([1e-3, 1e3], [0, 0], '--', color=(0.8, 0.8, 0.8))
axs[0].scatter(s_range, eps, c=eps, cmap=settings.HEATMAP_COLORMAP,
edgecolor='none', s=15, vmin=-1, vmax=1)
eps = list(map(eps_x_v, s_range))
axs[1].plot([1e-3, 1e3], [0, 0], '--', color=(0.8, 0.8, 0.8))
axs[1].scatter(s_range, eps, c=eps, cmap=settings.HEATMAP_COLORMAP,
edgecolor='none', s=15, vmin=-1, vmax=1)
axs[0].set_title('substrates', fontsize=12)
axs[1].set_title('inhibitors', fontsize=12)
axs[0].set_xlabel('substrate conc. $s$ [mM]', fontsize=12)
axs[1].set_xlabel('inhibitor conc. $I$ [mM]', fontsize=12)
axs[0].set_ylabel('elasticity', fontsize=12)
axs[0].set_xscale('log')
axs[1].set_xscale('log')
axs[0].set_xlim(1e-3, 1e3)
axs[1].set_xlim(1e-3, 1e3)
axs[0].set_ylim(-1, 1)
axs[1].set_ylim(-1, 1)
km_sat_agg = FigurePlotter.calc_agg_sat(self.km)
ki_sat_agg = FigurePlotter.calc_agg_sat(self.ki)
# keep and reorder only the conditions that were pre-selected
km_sat_agg = km_sat_agg.loc[:, CONDITIONS]
ki_sat_agg = ki_sat_agg.loc[:, CONDITIONS]
# count how many K_M/K_I values we have for each metabolite
# (i.e. how many different EC numbers)
km_counts = count_values(self.km, 'KM_Value')
ki_counts = count_values(self.ki, 'KI_Value')
counts = pd.DataFrame([km_counts, ki_counts]).transpose()
# make a dictionary mapping from the metabolite name to the same
# name, followed by the counts (km, ki)
index_mapping = {}
for i, row in counts.iterrows():
index_mapping[i] = '%s (%g,%g)' % (str(i).upper(), row['KM_Value'],
row['KI_Value'])
km_sat_agg.to_csv(os.path.join(settings.RESULT_DIR,
'km_elasticity_agg.csv'))
ki_sat_agg.to_csv(os.path.join(settings.RESULT_DIR,
'ki_elasticity_agg.csv'))
sat_joined = km_sat_agg.join(ki_sat_agg, how='inner',
lsuffix='_sub', rsuffix='_inh')
ind = sat_joined.mean(axis=1).sort_values(axis=0,
ascending=False).index
sat_joined = sat_joined.reindex_axis(ind, axis=0)
sat_joined.rename(index=index_mapping, inplace=True)
sns.heatmap(sat_joined,
ax=axs[2], mask=sat_joined.isnull(), annot=True, fmt='.2f',
cbar=False, vmin=-1, vmax=1, cmap=settings.HEATMAP_COLORMAP,
annot_kws={'fontdict': {'fontsize': 8}})
# change xtick labels back to the original strings
# (without the suffixes) and increase the font size
axs[2].set_xticklabels(list(km_sat_agg.columns) + list(ki_sat_agg.columns),
rotation=90, fontsize=12)
# rotate the metabolite names back to horizontal, and increase
# the font size
axs[2].set_yticklabels(sat_joined.index, rotation=0, fontsize=10)
axs[2].set_xlabel('growth condition', fontsize=10)
axs[2].set_ylabel('')
axs[2].set_title('as substrates' + ' '*50 + 'as inhibitors', fontsize=12)
axs[2].axvline(sat_joined.shape[1]/2, 0, 1, color='r')
settings.savefig(fig, 'fig5')
def plot_figS5(self):
def pivot_and_sort(k, sort_by='mean'):
k_piv = k.pivot('met:EC', 'growth condition', 'elasticity')
if sort_by == 'mean':
ind = k_piv.mean(axis=1).sort_values(axis=0, ascending=True).index
elif sort_by == 'index':
ind = sorted(k_piv.index)
k_piv = k_piv.reindex_axis(ind, axis=0)
return k_piv
km = self.km
ki = self.ki
km_pivoted = pivot_and_sort(km, sort_by='index')
ki_pivoted = pivot_and_sort(ki, sort_by='index')
km_pivoted.index = km_pivoted.index.str.upper()
ki_pivoted.index = ki_pivoted.index.str.upper()
# keep and reorder only the conditions that were pre-selected
km_pivoted = km_pivoted.loc[:, CONDITIONS]
ki_pivoted = ki_pivoted.loc[:, CONDITIONS]
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(18, 30))
sns.heatmap(km_pivoted, ax=ax0, mask=km_pivoted.isnull(),
cbar=False, vmin=-1, vmax=1, cmap=settings.HEATMAP_COLORMAP, fmt='.2f')
ax0.set_xticklabels(list(km_pivoted.columns), fontsize=12, rotation=90)
ax0.set_yticklabels(km_pivoted.index, rotation=0, fontsize=6)
ax0.set_title('substrates', fontsize=20)
ax0.set_xlabel('growth condition', fontsize=16)
ax0.set_ylabel('')
clb1 = matplotlib.colorbar.make_axes(ax1)
sns.heatmap(ki_pivoted, ax=ax1, mask=ki_pivoted.isnull(),
cbar=True, vmin=-1, vmax=1, annot=True, cmap=settings.HEATMAP_COLORMAP,
cbar_ax=clb1[0], fmt='.2f')
ax1.set_xticklabels(list(ki_pivoted.columns), fontsize=12, rotation=90)
ax1.set_title('inhibitors', fontsize=20)
ax1.set_yticklabels(ki_pivoted.index,
rotation=0, fontsize=10)
ax1.set_xlabel('growth condition', fontsize=16)
ax1.set_ylabel('')
clb1[0].set_ylabel('elasticity', fontsize=16)
settings.savefig(fig, 'figS5')
km_pivoted.to_csv(os.path.join(settings.RESULT_DIR,
'km_elasticity_full.csv'))
ki_pivoted.to_csv(os.path.join(settings.RESULT_DIR,
'ki_elasticity_full.csv'))
def plot_fig2ab(self):
def venn3_sets(set_a, set_b, set_c, set_labels, ax):
# order of values for Venn diagram:
# (Abc, aBc, ABc, abC, AbC, aBC, ABC)
Abc = len(set_a.difference(set_b.union(set_c)))
aBc = len(set_b.difference(set_a.union(set_c)))
abC = len(set_c.difference(set_a.union(set_b)))
ABc = len(set_a.intersection(set_b).difference(set_c))
AbC = len(set_a.intersection(set_c).difference(set_b))
aBC = len(set_b.intersection(set_c).difference(set_a))
ABC = len(set_a.intersection(set_b).intersection(set_c))
venn3(subsets=(Abc, aBc, ABc, abC, AbC, aBC, ABC),
set_labels=set_labels, ax=ax)
print("found %d native interactions in %s" % \
(self.regulation.shape[0], ORGANISM))
ind_inh = self.regulation['Mode'] == '-'
ind_act = self.regulation['Mode'] == '+'
inh_met = set(self.regulation.loc[ind_inh, 'bigg.metabolite'])
act_met = set(self.regulation.loc[ind_act, 'bigg.metabolite'])
inh_ec = set(self.regulation.loc[ind_inh, 'EC_number'])
act_ec = set(self.regulation.loc[ind_act, 'EC_number'])
fig, axs = plt.subplots(1, 2, figsize=(7, 5))
venn3_sets(inh_met, act_met, self.native_mets, ax=axs[0],
set_labels=('inhibitors', 'activators',
'E. coli metabolites (%d total)' %
len(self.native_mets)))
venn3_sets(inh_ec, act_ec, self.native_ECs, ax=axs[1],
set_labels=('inhibited', 'activated',
'E. coli reactions (%d total)' %
len(self.native_ECs)))
axs[0].annotate('a', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
axs[1].annotate('b', xy=(0.02, 0.98),
xycoords='axes fraction', ha='left', va='top',
size=20)
settings.savefig(fig, 'fig2ab')
res = {'inhibitors': list(inh_met), 'activators': list(act_met),
'all_metabolites': list(self.native_mets),
'inhibited': list(inh_ec), 'activated': list(act_ec),
'all_reactions': list(self.native_ECs)}
_fname = os.path.join(settings.RESULT_DIR, 'venn_groups.json')
with open(_fname, 'w') as fp:
json.dump(res, fp, indent=4)
def plot_fig2cd(self,highconfidence):
"""
Draw 2D histograms of the number of activating and
inhibiting reactions
grouped by metabolite and grouped by reaction
Highconfidence indicates that we should only use
edges which have two or more literature references
"""
highc_string = '_highconfidence' if highconfidence else ''
def plot_jointhist(data, xlabel, ylabel, xmax, ymax, highconfidence):
"""
plot the histogram as a scatter plot with marginal histograms,
and ensure empty bins are easily distinguishable from ones
that have at least 1 hit.
generally use xcrit = 12,ycrit = 5 unless using only high
confidence interactions, then xcrit = 6, ycrit = 2
"""
x = data[xlabel]
y = data[ylabel]
if highconfidence:
xcrit = 6
ycrit = 2
else:
xcrit = 12
ycrit = 5
# First, plot the scatter plot
g = sns.JointGrid(x=x, y=y, size=4,
xlim=(-1, xmax+1), ylim=(-1, ymax+1))
g = g.plot_joint(plt.scatter, alpha=0.2)
plt.gcf()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# annotate only unique points
# annotate only unique points
ann_df = data.drop_duplicates((xlabel, ylabel), keep=False)
ann_df = ann_df[(ann_df[xlabel] > xcrit) | (ann_df[ylabel] > ycrit)]
for i, row in ann_df.iterrows():
plt.annotate(i, xy=(row[xlabel], row[ylabel]),
xytext=(row[xlabel]+1, row[ylabel]+1),
ha='center', va='top', size=10,
textcoords='data')
# Next, plot the marginal histograms
g = g.plot_marginals(sns.distplot, kde=False)
return plt.gcf()
# if using high confidence edges, then find those edges
cols = ('bigg.reaction', 'bigg.metabolite')
if highconfidence:
reg = self.regulation
reg = reg[reg['Source'] == 'BRENDA']
reg['RefList'] = [item.split(',') if | pd.notnull(item) | pandas.notnull |
# generates accuracy and predictions using ML classifiers
# Requires generation of simulated data from "sim_speeches.r"
import os
import cPickle as pickle
import sys
import logging
import pandas as pd
import numpy as np
import re
import string
import itertools
import os.path
import time
import scipy
from scipy.sparse import csr_matrix
from sklearn import preprocessing
from sklearn.preprocessing import maxabs_scale
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedKFold
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
def fit_pred_offline_classifiers(X_train, y_train, X_test, y_test, X):
classifiers_balanced = {
'SGD': SGDClassifier(class_weight='balanced', n_jobs=10),
'Perceptron': Perceptron(class_weight='balanced', n_jobs=10),
'Passive-Aggressive': PassiveAggressiveClassifier(class_weight='balanced', n_jobs=10),
}
classifiers_bal_predprob = {"SAG": LogisticRegression(solver='sag', n_jobs=10, tol=1e-1, C=1.e4 / 50000 ),} # , C=1.e4 / 50000
cls_stats = {}
preds = {}
for cls_name in classifiers_bal_predprob:
stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 't0': time.time(), 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
for cls_name in classifiers_balanced:
stats = {'n_train': 0, 'n_train_pos': 0, 'accuracy': 0.0, 't0': time.time(), 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
tick = time.time()
for cls_name, cls in classifiers_bal_predprob.items():
print("fitting %s" % cls_name)
#logging.info("fitting %s" % cls_name)
cls.fit(X_train, y_train)#, classes=all_classes)
preds[cls_name] = cls.predict_proba(X)
# stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
tick = time.time()
for cls_name, cls in classifiers_balanced.items():
#logging.info("fitting %s" % cls_name)
cls = LogisticRegression(solver='sag', n_jobs=10, tol=1e-1, C=1.e4 / X_train.shape[0]) # put this here to get C correct
cls.fit(X_train, y_train)#, classes=all_classes)
preds[cls_name] = cls.predict(X)
# stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
return(cls_stats, preds)
#---------------------------------------
#---------------------------------------
def run_estimates(X, y):
# tick = time.time()
skf = StratifiedKFold(y, n_folds=10, shuffle=True)#, random_state=1234)
cls_stats = {}
preds= {}
foldid = 0
for train_index, test_index in skf:
#logging.info("fold: %d" % foldid)
#logging.info("TRAIN: %s" train_index)#, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
cls_stats[foldid], preds[foldid] = fit_pred_offline_classifiers(X_train, y_train, X_test, y_test, X)
foldid += 1
#for _, stats in sorted(cls_stats.items()):
# accuracy, n_examples = zip(*stats['accuracy_history'])
# fit_time = time.time() - tick
return(cls_stats, preds)
#---------------------------------------
#---------------------------------------
def avergage_per_classifier(cls_stats, classifier_names):
accuracies = {}
median = {}
vs = {}
for classif in classifier_names:
accs = []
for fold, stats in cls_stats.items():
relevant = stats[classif]
accs.append(relevant['accuracy'])
accuracies[classif] = np.mean(accs)
vs[classif] = np.var(accs)
median[classif] = np.median(accs)
return(accuracies, median, vs)
#---------------------------------------
#
#---------------------------------------
def stats_from_estimates(yearly_stats, randomize, run_id):
""" """
classifier_names = ['SAG', 'SGD', 'Perceptron','Passive-Aggressive'] #classifiers.keys()
rows = []
for indx, yr in sess_indx.items()[:79]:
#logging.info(str(yr))
try:
curr = yearly_stats[indx]
mns, meds, vs = avergage_per_classifier(curr, classifier_names )
rows.append([indx, yr, mns['SAG'], mns['SGD'], mns['Perceptron'], mns['Passive-Aggressive'],
meds['SAG'], meds['SGD'], meds['Perceptron'], meds['Passive-Aggressive'],
vs['SAG'], vs['SGD'], vs['Perceptron'], vs['Passive-Aggressive'] ])
except:
logging.error("Error getting stats for: ", str(yr))
res = pd.DataFrame(data=rows, columns = ['index', 'yrmth',
'mn_sag','mn_sgd','mn_pcpt','mn_passAgr',
'md_sag','md_sgd','md_pcpt','md_passAgr',
'var_sag','var_sgd','var_pcpt','var_passAgr' ])
res.to_csv(results_dir + '/acc_allmembers_rand' + str(randomize)+ "_run" + str(run_id) +".csv", index=False)
#---------------------------------------
#
#---------------------------------------
classifier_names = ['SAG', 'SGD', 'Perceptron','Passive-Aggressive']
def calc_xfold_acc(df, rows):
X = np.array(df.iloc[:,1:])
y = np.array(df.Y)
stats, preds = run_estimates(X, y)
mns, meds, vs = avergage_per_classifier(stats, classifier_names )
rows.append([strength, size, separation, noisefrac, mns['SAG'], mns['SGD'], mns['Perceptron'], mns['Passive-Aggressive'],
meds['SAG'], meds['SGD'], meds['Perceptron'], meds['Passive-Aggressive'],
vs['SAG'], vs['SGD'], vs['Perceptron'], vs['Passive-Aggressive'] ])
return(rows)
#---------------------------------------
#
#---------------------------------------
def calc_xfold_acc_predprob(df, rows):
""" and predict accuracy."""
X = np.array(df.iloc[:,1:])
y = np.array(df.Y)
stats, preds = run_estimates(X, y)
folds_preds = []
for fold in range(0,10):
pred = preds[fold]
probs = pred['SAG'][:,1]
folds_preds.append(probs)
folds_preds = np.array(folds_preds)
folds_preds = folds_preds.mean(axis=0)
mns, meds, vs = avergage_per_classifier(stats, classifier_names )
rows.append([strength, size, separation, noisefrac, mns['SAG'], mns['SGD'], mns['Perceptron'], mns['Passive-Aggressive'],
meds['SAG'], meds['SGD'], meds['Perceptron'], meds['Passive-Aggressive'],
vs['SAG'], vs['SGD'], vs['Perceptron'], vs['Passive-Aggressive'], folds_preds])
return(rows)
#---------------------------------------
#
#---------------------------------------
def main():
curr_dir = os.getcwd()
strength = 300
size= 100
# Accuracy, for Figure 1:
rows = []
for separation in np.arange(0.3, 0.5, .01):
print("separation: %.2f" % separation)
for noisefrac in np.arange(0,.95, .05):
print("noise frac: %.2f" % noisefrac)
filein = "/nas/tz/uk/sim/sims/sim_" + str(strength) + "_" + str(size) + "_" + str(int(separation*100)) + "_" + str(int(noisefrac*100)) + "_" + ".csv"
print(filein)
df = pd.read_csv(filein, index_col=0)
rows = calc_xfold_acc(df, rows)
res2 = pd.DataFrame(data=rows, columns = ['strength', 'size', 'separation','noisefrac',
'mn_sag','mn_sgd','mn_pcpt','mn_passAgr',
'md_sag','md_sgd','md_pcpt','md_passAgr',
'var_sag','var_sgd','var_pcpt','var_passAgr' ])
res2.to_csv(curr_dir + "acc_sims.csv")
# now, predictions, with just noisefrac variation (for Figure 2)
separation = 0.4
rows = []
for noisefrac in np.arange(0,1, .001):
print("noise frac: %.2f" % noisefrac)
filein = "/nas/tz/uk/sim/sims/sim_1k__" + str(strength) + "_" + str(size) + "_" + str(int(separation*100)) + "_" + str(noisefrac*100) + "_" + ".csv"
filein2 = re.sub(r'.0_.csv', r'_.csv', filein)
print(filein2)
df = pd.read_csv(filein2, index_col=0)
rows = calc_xfold_acc_predprob(df, rows)
resP = pd.DataFrame(data=rows, columns = ['strength', 'size', 'separation','noisefrac',
'mn_sag','mn_sgd','mn_pcpt','mn_passAgr',
'md_sag','md_sgd','md_pcpt','md_passAgr',
'var_sag','var_sgd','var_pcpt','var_passAgr', 'preds' ])
#resP.to_csv() - not used...
preds = resP['preds']
p2 = []
for i in range(1000):
p2.append(preds[i])
p2 = np.array(p2)
df = | pd.DataFrame(p2) | pandas.DataFrame |
"""
Inspired by https://github.com/wassname/rl-portfolio-management/blob/master/src/environments/portfolio.py
and https://github.com/vermouth1992/drl-portfolio-management/blob/master/src/environment/portfolio.py, which are
based on [Jiang 2017](https://arxiv.org/abs/1706.10059)
https://github.com/ZhengyaoJiang/PGPortfolio
"""
'''
@Author: <NAME>
'''
import numpy as np
import pandas as pd
import matplotlib as plt
import datetime
import gym
import gym.spaces
from environment.data import DataProcessor, date_to_index, index_to_date
from environment.portfolio import Portfolio
eps = 1e-8
def sharpe(returns, freq=30, rfr=0):
# The function that is used to caculate sharpe ratio
return (np.sqrt(freq) * np.mean(returns - rfr + eps)) / np.std(returns - rfr + eps)
def max_drawdown(return_list):
# The function that is used to calculate the max drawndom
i = np.argmax((np.maximum.accumulate(return_list) - return_list) / np.maximum.accumulate(return_list)) # 结束位置
if i == 0:
return 0
j = np.argmax(return_list[:i])
return (return_list[j] - return_list[i]) / (return_list[j])
# A class for portfolio enviroment
class envs(gym.Env):
def __init__(self,
product_list,
market_feature,
feature_num,
steps,
window_length,
mode,
start_index=0,
start_date=None):
self.window_length = window_length
self.start_index = start_index
self.mode = mode
self.dataprocessor = DataProcessor(
product_list=product_list,
market_feature=market_feature,
feature_num=feature_num,
steps=steps,
window_length=window_length,
mode=mode,
start_index=start_index,
start_date=start_date)
if mode == "Train":
trading_cost = 0.0000
elif mode == "Test":
trading_cost = 0.0025
self.portfolio = Portfolio(steps=steps,trading_cost=trading_cost, mode=mode)
def step(self, action):
# Normalize the action
action = np.clip(action, 0, 1)
weights = action
weights /= (weights.sum() + eps)
weights[0] += np.clip(1 - weights.sum(), 0, 1)
observation, done1, next_obs, = self.dataprocessor._step()
# Connect 1, no risk asset to the portfolio
c_observation = np.ones((1, self.window_length, observation.shape[2]))
observation = np.concatenate((c_observation, observation), axis=0)
c_next_obs = np.ones((1, 1, next_obs.shape[2]))
next_obs = np.concatenate((c_next_obs, next_obs), axis=0)
# Obtain the price vector
close_price_vector = observation[:, -1, 3]
open_price_vector = observation[:, -1, 0]
reset = 0
y1 = observation[:, 0, 3] / observation[:, -1, 3]
reward, info, done2 = self.portfolio._step(weights, y1, reset)
info['date'] = index_to_date(self.start_index + self.dataprocessor.idx + self.dataprocessor.step)
self.infos.append(info)
return observation, reward, done1 or done2, info
def reset(self):
self.infos = []
self.portfolio.reset()
observation, next_obs = self.dataprocessor.reset()
c_observation = np.ones((1, self.window_length, observation.shape[2]))
observation = np.concatenate((c_observation, observation), axis=0)
c_next_obs = np.ones((1, 1, next_obs.shape[2]))
next_obs = np.concatenate((c_next_obs, next_obs), axis=0)
info = {}
return observation, info
def render(self):
df_info = pd.DataFrame(self.infos)
df_info['date'] = | pd.to_datetime(df_info['date'], format='%Y-%m-%d') | pandas.to_datetime |
"""
Data structure for 1-dimensional cross-sectional and time series data
"""
# pylint: disable=E1101,E1103
# pylint: disable=W0703,W0622,W0613,W0201
import itertools
import operator
import sys
import warnings
from numpy import nan, ndarray
import numpy as np
from pandas.core.common import (isnull, notnull, _ensure_index,
_is_bool_indexer, _default_index)
from pandas.core.daterange import DateRange
from pandas.core.generic import PandasObject
from pandas.core.index import Index, MultiIndex
from pandas.core.indexing import _SeriesIndexer, _maybe_droplevels
import pandas.core.datetools as datetools
import pandas._tseries as _tseries
__all__ = ['Series', 'TimeSeries']
def _numpy_lt_151():
return np.__version__ < '1.5.1'
#-------------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
from pandas.core.frame import DataFrame
if isinstance(other, Series):
if self.index.equals(other.index):
return Series(op(self.values, other.values), index=self.index)
new_index = self.index + other.index
this_reindexed = self.reindex(new_index)
other_reindexed = other.reindex(new_index)
arr = op(this_reindexed.values, other_reindexed.values)
return Series(arr, index=new_index)
elif isinstance(other, DataFrame):
return NotImplemented
else:
# scalars
return Series(op(self.values, other), index=self.index)
return wrapper
def _flex_method(op, name):
def f(self, other, fill_value=None):
return self._binop(other, op, fill_value=fill_value)
f.__doc__ = """
Binary operator %s with support to substitute a fill_value for missing data
in one of the inputs
Parameters
----------
other: Series or scalar value
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
Returns
-------
result : Series
""" % name
f.__name__ = name
return f
#-------------------------------------------------------------------------------
# Series class
class Series(np.ndarray, PandasObject):
"""
Generic indexed (labeled) vector, including time series
Contains values in a numpy-ndarray with an optional bound index
(also an array of dates, strings, or whatever you want the 'row
names' of your series to be)
Rows can be retrieved by index value (date, string, etc.) or
relative position in the underlying array.
Operations between Series (+, -, /, *, **) align values based on
their associated index values-- they need not be the same length.
Parameters
----------
data : array-like, dict, or scalar value
Contains data stored in Series
index : array-like
Index object (or other iterable of same length as data)
Must be input if first argument is not a dict. If both a dict
and index sequence are used, the index will override the keys
found in the dict.
dtype : numpy.dtype or None
If None, dtype will be inferred
copy : boolean, default False
Copy input data
Notes
-----
If you combine two series, all values for an index position must
be present or the value for that index position will be nan. The
new index is the sorted union of the two Series indices.
Data is *not* copied from input arrays by default
"""
_AXIS_NUMBERS = {
'index' : 0
}
_AXIS_NAMES = dict((v, k) for k, v in _AXIS_NUMBERS.iteritems())
def __new__(cls, data, index=None, dtype=None, name=None, copy=False):
if isinstance(data, Series):
if index is None:
index = data.index
elif isinstance(data, dict):
if index is None:
index = Index(sorted(data.keys()))
data = [data[idx] for idx in index]
# Create array, do *not* copy data by default, infer type
try:
subarr = np.array(data, dtype=dtype, copy=copy)
except ValueError:
if dtype:
raise
subarr = np.array(data, dtype=object)
if subarr.ndim == 0:
if isinstance(data, list): # pragma: no cover
subarr = np.array(data, dtype=object)
elif index is not None:
value = data
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
if isinstance(value, basestring) and dtype is None:
dtype = np.object_
if dtype is None:
subarr = np.empty(len(index), dtype=type(value))
else:
subarr = np.empty(len(index), dtype=dtype)
subarr.fill(value)
else:
return subarr.item()
elif subarr.ndim > 1:
raise Exception('Data must be 1-dimensional')
if index is None:
index = _default_index(len(subarr))
# This is to prevent mixed-type Series getting all casted to
# NumPy string type, e.g. NaN --> '-1#IND'.
if issubclass(subarr.dtype.type, basestring):
subarr = np.array(data, dtype=object, copy=copy)
# Change the class of the array to be the subclass type.
subarr = subarr.view(cls)
subarr.index = index
subarr.name = name
if subarr.index.is_all_dates():
subarr = subarr.view(TimeSeries)
return subarr
def __init__(self, *args, **kwargs):
pass
def __hash__(self):
raise TypeError('unhashable type')
_index = None
def _get_index(self):
return self._index
def _set_index(self, index):
indexTypes = ndarray, Index, list, tuple
if not isinstance(index, indexTypes):
raise TypeError("Expected index to be in %s; was %s."
% (indexTypes, type(index)))
if len(self) != len(index):
raise AssertionError('Lengths of index and values did not match!')
self._index = _ensure_index(index)
index = property(fget=_get_index, fset=_set_index)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self._index = getattr(obj, '_index', None)
def toDict(self):
return dict(self.iteritems())
def to_sparse(self, kind='block', fill_value=None):
"""
Convert Series to SparseSeries
Parameters
----------
kind : {'block', 'integer'}
fill_value : float, defaults to NaN (missing)
Returns
-------
sp : SparseSeries
"""
from pandas.core.sparse import SparseSeries
return SparseSeries(self, kind=kind, fill_value=fill_value)
def __contains__(self, key):
return key in self.index
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(ndarray.__reduce__(self))
subclass_state = (self.index, )
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
ndarray.__setstate__(self, nd_state)
index, = own_state
self.index = index
def __getitem__(self, key):
"""
Returns item(s) for requested index/sequence, overrides default behavior
for series[key].
Logic is as follows:
- If key is in the index, return the value corresponding
to that index
- Otherwise, use key (presumably one integer or a sequence
of integers) to obtain values from the series. In the case
of a sequence, a 'slice' of the series (with corresponding dates)
will be returned, otherwise a single value.
"""
try:
if isinstance(self.index, MultiIndex):
return self._multilevel_index(key)
else:
values = self.values
try:
return values[self.index.get_loc(key)]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise
except TypeError:
pass
def _index_with(indexer):
return Series(self.values[indexer],
index=self.index[indexer])
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if _is_bool_indexer(key):
self._check_bool_indexer(key)
key = np.asarray(key, dtype=bool)
return _index_with(key)
# TODO: [slice(0, 5, None)] will break if you convert to ndarray,
# e.g. as requested by np.median
try:
return _index_with(key)
except Exception:
key = np.asarray(key)
return _index_with(key)
def _multilevel_index(self, key):
values = self.values
try:
loc = self.index.get_loc(key)
if isinstance(loc, slice):
# TODO: what if a level contains tuples??
new_index = self.index[loc]
new_index = _maybe_droplevels(new_index, key)
return Series(values[loc], index=new_index)
else:
return values[loc]
except KeyError:
if isinstance(key, (int, np.integer)):
return values[key]
raise Exception('Requested index not in this series!')
def get(self, key, default=None):
"""
Returns value occupying requested index, default to specified
missing value if not present
Parameters
----------
key : object
Index value looking for
default : object, optional
Value to return if key not in index
Returns
-------
y : scalar
"""
if key in self.index:
return self._get_val_at(self.index.get_loc(key))
else:
return default
# help out SparseSeries
_get_val_at = ndarray.__getitem__
def __getslice__(self, i, j):
"""
Returns a slice of the Series.
Note that the underlying values are COPIES.
The reason that the getslice returns copies is that otherwise you
will have a reference to the original series which could be
inadvertently changed
"""
return Series(self.values[i:j].copy(), index=self.index[i:j])
def __setitem__(self, key, value):
values = self.values
try:
loc = self.index.get_loc(key)
values[loc] = value
return
except KeyError:
if isinstance(key, (int, np.integer)):
values[key] = value
return
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item
pass
self._check_bool_indexer(key)
# special handling of boolean data with NAs stored in object
# arrays. Sort of an elaborate hack since we can't represent boolean
# NA. Hmm
if isinstance(key, np.ndarray) and key.dtype == np.object_:
mask = isnull(key)
if mask.any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
if set([True, False]).issubset(set(key)):
key = np.asarray(key, dtype=bool)
values[key] = value
return
values[key] = value
def _check_bool_indexer(self, key):
# boolean indexing, need to check that the data are aligned, otherwise
# disallowed
if isinstance(key, Series) and key.dtype == np.bool_:
if not key.index.equals(self.index):
raise Exception('can only boolean index with like-indexed '
'Series or raw ndarrays')
def __setslice__(self, i, j, value):
"""Set slice equal to given value(s)"""
ndarray.__setslice__(self, i, j, value)
def __repr__(self):
"""Clean string representation of a Series"""
if len(self.index) > 500:
return self._make_repr(50)
elif len(self.index) > 0:
return _seriesRepr(self.index, self.values)
else:
return '%s' % ndarray.__repr__(self)
def _make_repr(self, max_vals=50):
vals = self.values
index = self.index
num = max_vals // 2
head = _seriesRepr(index[:num], vals[:num])
tail = _seriesRepr(index[-(max_vals - num):], vals[-(max_vals - num):])
return head + '\n...\n' + tail + '\nlength: %d' % len(vals)
def toString(self, buffer=sys.stdout, nanRep='NaN'):
print >> buffer, _seriesRepr(self.index, self.values,
nanRep=nanRep)
def __str__(self):
return repr(self)
def __iter__(self):
return iter(self.values)
def copy(self):
return Series(self.values.copy(), index=self.index)
#-------------------------------------------------------------------------------
# Arithmetic operators
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__mul__ = _arith_method(operator.mul, '__mul__')
__div__ = _arith_method(operator.div, '__div__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__pow__ = _arith_method(operator.pow, '__pow__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__radd__ = _arith_method(operator.add, '__add__')
__rmul__ = _arith_method(operator.mul, '__mul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__sub__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__div__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__truediv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__pow__')
# Inplace operators
__iadd__ = __add__
__isub__ = __sub__
__imul__ = __mul__
__idiv__ = __div__
__ipow__ = __pow__
#-------------------------------------------------------------------------------
# Statistics, overridden ndarray methods
# TODO: integrate bottleneck
def count(self):
"""
Return number of observations of Series.
Returns
-------
nobs : int
"""
return notnull(self.values).sum()
def sum(self, axis=None, dtype=None, out=None):
"""
Sum of non-null values
"""
return self._ndarray_statistic('sum')
def mean(self, axis=None, dtype=None, out=None):
"""
Mean of non-null values
"""
return self._ndarray_statistic('mean')
def _ndarray_statistic(self, funcname):
arr = self.values
retVal = getattr(arr, funcname)()
if isnull(retVal):
arr = remove_na(arr)
if len(arr) == 0:
return np.nan
retVal = getattr(arr, funcname)()
return retVal
def quantile(self, q=0.5):
"""
Return value at the given quantile
Parameters
----------
q : quantile
0 <= q <= 1
Returns
-------
q : float
"""
from scipy.stats import scoreatpercentile
return scoreatpercentile(self.valid().values, q * 100)
def describe(self):
"""
Generate various summary statistics of columns, excluding NaN values
Returns
-------
DataFrame
"""
names = ['count', 'mean', 'std', 'min',
'10%', '50%', '90%', 'max']
data = [self.count(), self.mean(), self.std(), self.min(),
self.quantile(.1), self.median(), self.quantile(.9),
self.max()]
return Series(data, index=names)
def min(self, axis=None, out=None):
"""
Minimum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = np.inf
return arr.min()
def max(self, axis=None, out=None):
"""
Maximum of non-null values
"""
arr = self.values.copy()
if not issubclass(arr.dtype.type, np.int_):
arr[isnull(arr)] = -np.inf
return arr.max()
def std(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased standard deviation of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.std(nona, axis, dtype, out, ddof)
def var(self, axis=None, dtype=None, out=None, ddof=1):
"""
Unbiased variance of non-null values
"""
nona = remove_na(self.values)
if len(nona) < 2:
return nan
return ndarray.var(nona, axis, dtype, out, ddof)
def skew(self):
"""
Unbiased skewness of the non-null values
Returns
-------
skew : float
"""
y = np.array(self.values)
mask = | notnull(y) | pandas.core.common.notnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 17 15:50:11 2018
@author: sitaram
"""
import pandas as pd
import os,sys
import glob
#location of all background network files and number of files
bg_files=sys.argv[1]
number=int(sys.argv[2])
observed_file=sys.argv[3]
obs_file=pd.read_csv(observed_file)
#print('length of original frequency file ',len(obs_file['frequency'].dropna()))
obs_file.columns=['journal_pairs','obs_frequency']
pattern='*freq*'
file_names=glob.glob(bg_files+pattern)
file_names.sort()
#file_names=os.listdir(bg_files+pattern)
#file_names.sort()
#Performing left join on each file to obtain only journal pairs which have frequency
for i in range(0,number):
data= | pd.read_csv(file_names[i]) | pandas.read_csv |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import plotting # pylint: disable=wrong-import-position, wrong-import-order
from unittest.mock import patch
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
import matplotlib
from nevergrad.common import testing
from . import utils
matplotlib.use('Agg')
def test_get_winners_df() -> None:
data = [["alg0", 46424, .4],
["alg1", 4324546, .1],
["alg1", 424546, .5],
["alg2", 1424546, .3]]
df = | pd.DataFrame(columns=["optimizer_name", "blublu", "loss"], data=data) | pandas.DataFrame |
import itertools
from typing import Union
import napari
import numpy as np
import pandas as pd
from Geometry3D import *
from scipy import ndimage
from sklearn.neighbors import NearestNeighbors
def annotate_filaments(annotation_layer, output_fn, maxpoints: int = 10):
"""
Parameters
----------
annotation_layer : napari layer
napari shapes layer to add annotations
output_fn : str
csv file to save filament coordinates
maxpoints : int, optional
Maximum points in the annotated filament.
Used for regularization.
Default: 10
Returns
-------
"""
near_points = []
far_points = []
polygons = []
@annotation_layer.mouse_drag_callbacks.append
def draw_polygons(layer, event):
"""
Draw two polygons in different projections and calculate their intersection
"""
yield
while event.type == 'mouse_move':
if 'Control' in event.modifiers: # draw a polygon if "Control" is pressed
# get the near a far points of the mouse position
near_point, far_point = layer.get_ray_intersections(
event.position,
event.view_direction,
event.dims_displayed
)
# append to the array of near and far points
if (near_point is not None) and (far_point is not None):
near_points.append(near_point)
far_points.append(far_point)
# draw a polygon from the array of near and far points if there are > 3 of them
if len(near_points) > 3:
layer = draw_polygon(layer, near_points, far_points)
else: # normal rotation when "Control" is not pressed
# add a polygon if there are some point saved
if len(near_points) > 0:
polygons.append([near_points.copy(), far_points.copy()])
# clear the points array
near_points.clear()
far_points.clear()
# if there are 2 or more polygons, calculate their intersection
if len(polygons) >= 2:
npt1 = polygons[0][0]
npt2 = polygons[1][0]
fpt1 = polygons[0][1]
fpt2 = polygons[1][1]
mt = compute_polygon_intersection(npt1, npt2, fpt1, fpt2, maxpoints=maxpoints)
# add the calculated filament
mt = sort_points(mt) # make sure the filament coordinates are sorted
# remove the 2 polygons from the shapes layer
layer.selected_data = set(range(layer.nshapes - 2, layer.nshapes))
layer.remove_selected()
# add the calculated filament
layer.add(mt, shape_type='path', edge_color='green', edge_width=1)
# clear the polygons array
polygons[0] = None
polygons[1] = None
polygons.clear()
# save current annotations to a csv file
annotation_to_pandas(layer.data[1:]).to_csv(output_fn, index=False)
yield
@annotation_layer.bind_key('d')
def delete_the_last_shape(layer):
"""
Remove the last added shape (polygon or filament)
"""
if layer.nshapes > 1:
msg = 'delete the last added shape'
layer.selected_data = set(range(layer.nshapes - 1, layer.nshapes))
if len(polygons) > 0:
_ = polygons.pop()
layer.remove_selected()
else:
msg = 'no shapes to delete'
layer.status = msg
print(msg)
annotation_to_pandas(layer.data[1:]).to_csv(output_fn, index=False)
def create_random_lines(size: int, n: int = 10, sigma: float = 0.8) -> np.ndarray:
"""
Generate an image (cube) with random lines.
Parameters
----------
size : int
Size of the cube side in pixels
n : int, optional
Number of random lines.
Default: 10
sigma : float, optional
Size of the Gaussian filter to smooth the line image.
Default: 0.8
Returns
-------
np.ndarray:
Image cube with random lines
"""
img = np.zeros([size, size, size])
for i in range(n):
start, end = np.random.randint(0, size, (2, 3))
ind = np.int_(np.linspace(start, end, 100, endpoint=False))
img[tuple(ind.transpose())] = 255
img = ndimage.gaussian_filter(img, sigma)
return img
def add_annotation_layer(viewer: napari.Viewer):
"""
Add an annotation layer to napari viewer.
Parameters
----------
viewer : napari.Viewer
napari viewer
Returns
-------
napari shapes layer with a bounding box shape
"""
shape = viewer.layers[0].data.shape
# add a bounding box to set the coordinates range
bbox = list(itertools.product(*[np.arange(2)
for i in range(len(shape[-3:]))]))
if len(shape) > 3:
bbox = [(0,) + b for b in bbox]
bbox = bbox * np.array(shape)
layer = viewer.add_shapes(bbox,
name='annotations',
shape_type='path',
edge_width=0)
return layer
def draw_polygon(layer, near_points: list, far_points: list, color: str = 'red'):
"""
Draw a polygon between provided near and far points.
Parameters
----------
layer : napari shapes layer
napari shapes layer with annotations
near_points : list
List of polygon coordinates nearer to the viewer.
far_points : list
List of polygon coordinates further from the viewer.
color : str, optional
Color of the polygon
Returns
-------
Updated shapes layer
"""
far_points_reverse = far_points.copy()
far_points_reverse.reverse()
polygon = np.array(near_points + far_points_reverse)
if np.array((layer.data[-1][0] == polygon[0])).all():
layer.selected_data = set(range(layer.nshapes - 1, layer.nshapes))
layer.remove_selected()
layer.add(
polygon,
shape_type='polygon',
edge_width=1,
edge_color=color
)
return layer
def tetragon_intersection(p1: list, p2: list):
"""
Calculate intersection of two tetragons in 3D
Parameters
----------
p1, p2 : list
List of tetragon coordinates
Returns
-------
list or None:
List of (two) coordinate of the intersection line or None if no intersection exists.
"""
t = []
p1 = np.array(p1)
p2 = np.array(p2)
if p1.shape[1] > 3:
t = list(p1[0][:-3])
p1 = p1[:, -3:]
p2 = p2[:, -3:]
p1 = list(set([Point(*coords) for coords in p1]))
p2 = list(set([Point(*coords) for coords in p2]))
if len(p1) > 2 and len(p2) > 2:
plane1 = ConvexPolygon(p1)
plane2 = ConvexPolygon(p2)
inter = intersection(plane1, plane2)
if inter is not None:
inter = [t + list(pt) for pt in inter]
return inter
else:
return None
def smooth_points(points, sig, maxpoints):
points = np.array(points)
points = ndimage.gaussian_filter(points, [sig, 0])
if maxpoints is not None:
ind = np.int_(np.linspace(0, len(points) - 1, maxpoints, endpoint=False))
points = np.array(points)[ind]
return points
def compute_polygon_intersection(npt1: np.ndarray, npt2: np.ndarray,
fpt1: np.ndarray, fpt2: np.ndarray,
sigma=1, maxpoints=None):
"""
Calculate intersection of two non-convex polygons represented by a list of near and far points.
Parameters
----------
npt1 : np.ndarray
Near points of the first polygon.
npt2 : np.ndarray
Near points of the second polygon.
fpt1 : np.ndarray
Far points of the first polygon.
fpt2 : np.ndarray
Far points of the second polygon.
sigma : float
Gaussian filter size in pixels to smooth the polygon points array before computing intersection.
Default: 1
maxpoints : int, optional
If provided, the number of points will be reduced to this number before computing intersection.
Default: None
Returns
-------
np.ndarray:
n x d array of the intersections coordinates,
where n is the number of points, d is the number of dimensions.
"""
mt = []
npt1 = smooth_points(npt1, sigma, maxpoints)
npt2 = smooth_points(npt2, sigma, maxpoints)
fpt1 = smooth_points(fpt1, sigma, maxpoints)
fpt2 = smooth_points(fpt2, sigma, maxpoints)
for i in range(len(npt1) - 1):
for j in range(len(npt2) - 1):
p1 = [npt1[i], npt1[i + 1], fpt1[i + 1], fpt1[i]]
p2 = [npt2[j], npt2[j + 1], fpt2[j + 1], fpt2[j]]
inter = tetragon_intersection(p1, p2)
if inter is not None:
if len(mt) == 0:
mt = inter
else:
mt = np.concatenate([mt, inter], axis=0)
mt = np.array(list(set([tuple(np.round(mt[i], 1)) for i in range(len(mt))])))
return mt
def __find_furthest_point_indices(points):
nbr = NearestNeighbors(n_neighbors=len(points)).fit(points)
distances, indices = nbr.kneighbors(points)
ind = np.where(distances == np.max(distances))
ind2 = [ind[0][0], indices[ind][0]]
return ind2
def sort_points(points: Union[list, np.ndarray]):
""" Sort the coordinates in the order on minimal distance path between them.
Parameters
----------
points : list
List of coordinates.
Returns
-------
list:
Sorted list of coordinates.
"""
sorted1 = []
sorted2 = []
while len(points) >= 2:
ind = __find_furthest_point_indices(points)
selected = points[ind]
if len(sorted1) == 0 or np.linalg.norm(selected[0] - sorted1[-1]) < np.linalg.norm(selected[0] - sorted2[-1]):
sorted1.append(selected[0])
sorted2.append(selected[1])
else:
sorted1.append(selected[1])
sorted2.append(selected[0])
points = points[~np.isin(np.arange(len(points)), ind)]
if len(points) > 0:
sorted1 = sorted1 + [points[0]]
sorted2.reverse()
sorted1 = sorted1 + sorted2
points = np.array(sorted1)
return points
def annotation_to_pandas(data: list) -> pd.DataFrame:
"""
Convert list of path to a pandas table with coordinates.
Parameters
----------
data : list
List of paths, each of which is a list of coordinates.
Returns
-------
pd.DataFrame:
pandas DataFrame with coordinates
"""
df = pd.DataFrame()
if len(data) > 0:
columns = ['t', 'z', 'y', 'x']
columns = columns[-data[0].shape[1]:]
for i, d in enumerate(data):
cur_df = | pd.DataFrame(d, columns=columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import random
import os
import datetime
import pickle
from IPython.display import display
from threading import Thread, Event
import ipywidgets as widgets
import time
# Helper function for bolding text string
def bold(string):
return '\033[1m' + string + '\033[0m'
# Helper function for ordinals
def ordinal(num):
lst = ['st', 'nd', 'rd'] + ['th'] * 17 + (
['st', 'nd', 'rd'] + ['th'] * 7) * 100
return str(num) + lst[num - 1]
class ReusableThread(Thread):
"""
Taken from:
https://www.codeproject.com/Tips/1271787/Python-Reusable-Thread-Class
This class provides code for a restartale / reusable thread
join() will only wait for one (target)functioncall to finish
finish() will finish the whole thread (after that, it's not restartable
anymore)
"""
def __init__(self, target, args):
self._startSignal = Event()
self._oneRunFinished = Event()
self._finishIndicator = False
self._callable = target
self._callableArgs = args
Thread.__init__(self)
def restart(self):
"""make sure to always call join() before restarting"""
self._startSignal.set()
def run(self):
""" This class will reprocess the object "processObject" forever.
Through the change of data inside processObject and start signals
we can reuse the thread's resources"""
self.restart()
while True:
# wait until we should process
self._startSignal.wait()
self._startSignal.clear()
if self._finishIndicator: # check, if we want to stop
self._oneRunFinished.set()
return
# call the threaded function
self._callable(*self._callableArgs)
# notify about the run's end
self._oneRunFinished.set()
def join(self, timeout=None):
""" This join will only wait for one single run (target functioncall)
to be finished"""
self._oneRunFinished.wait()
self._oneRunFinished.clear()
def finish(self):
self._finishIndicator = True
self.restart()
self.join()
class Draft:
def __init__(self, draft_format):
# Draft basics
assert draft_format in ('Salary Cap', 'Snake')
self.format = draft_format
self.curr_yr = str(datetime.datetime.now().year)
self.last_yr = str(int(self.curr_yr) - 1)
self.clock_num_sec = 10
self.clock = None
self.num_rounds = 16
if self.format == 'Snake':
self.input_str = """
You can either enter who you would like to draft or perform any of
the following options by entering it's corresponding number:
1) Look at who you already have drafted
2) View your current depth chart
3) See Mike Clay's best players available
4) See the last 10 players drafted
5) Look at the full draft history
"""
else:
self.input_str = """
You can either enter who you would like to nominate for the
auction or perform any of the following options by entering it's
corresponding number:
1) Look at individual draft histories
2) View all current depth charts
3) See expected salaries and point projections
4) See the last 10 players drafted
5) Look at the full draft history
6) Check how much a player is worth
"""
# File paths
self.keepers_pkl = '{}/keepers.pkl'.format(self.curr_yr)
self.draft_order_pkl = '{}/draft_order.pkl'.format(self.curr_yr)
self.last_yr_res = '{}/draft_results.xlsx'.format(self.last_yr)
self.raw_data = '{}/raw_data.xlsx'.format(self.curr_yr)
self.last_yr_indv = '{}/indv_draft_results.xlsx'.format(self.last_yr)
self.results = '{}/draft_results.xlsx'.format(self.curr_yr)
self.indv_results = '{}/indv_draft_results.xlsx'.format(self.curr_yr)
self.indv_depth_charts = '{}/indv_depth_charts.xlsx'.format(
self.curr_yr)
self.draft_params_pkl = '{}/draft_params.pkl'.format(self.curr_yr)
# Data structures
self.owners = pd.ExcelFile(self.last_yr_indv).sheet_names
self.last_yr_df = pd.read_excel(self.last_yr_res, index_col=2)
self.player_pool = pd.read_excel(self.raw_data, index_col=[0])
self.player_pool['Position'] = self.player_pool['Position'].str.strip()
if os.path.exists(self.keepers_pkl):
with open(self.keepers_pkl, 'rb') as f:
self.keepers = pickle.load(f)
else:
self.keepers = None
if os.path.exists(self.draft_order_pkl):
with open(self.draft_order_pkl, 'rb') as f:
self.draft_order = pickle.load(f)
else:
self.draft_order = None
self.draft_history = pd.DataFrame(
index=[], columns=self.player_pool.columns)
self.draft_history.index.name = 'Pick Overall'
self.draft_history_indv = {}
self.depth_charts = {}
for owner in self.owners:
self.draft_history_indv[owner] = pd.DataFrame(
index=[], columns=self.player_pool.columns)
self.draft_history_indv[owner].index.name = 'Pick Overall'
self.depth_charts[owner] = pd.read_excel(
'depth_chart_blank.xlsx', index_col=[0])
# Draft trackers
self.pick = 1
self.owner_idx = 0
self.round_num = 1
# Resume draft if previously started
if os.path.exists(self.draft_params_pkl):
with open(self.draft_params_pkl, 'rb') as f:
draft_params = pickle.load(f)
self.pick, self.owner_idx, self.round_num, self.player_pool, \
self.draft_history, self.draft_history_indv, \
self.depth_charts = draft_params
def _determine_keepers(self):
self.keepers = {}
for owner in self.owners:
input_str = '{}, who would you like to keep? '.format(bold(owner))
player = input(input_str)
while True:
if player == '0':
player = None
round_lost = None
break
if player in self.last_yr_df.index:
if self.last_yr_df.Round[player] > 1:
round_lost = self.last_yr_df.Round[player] - 1
break
else:
input_str = '\nYou drafted that player in the 1st ' \
'Round and cannot keep them. Who else ' \
'would you like to keep? '
player = input(input_str)
else:
if player in self.player_pool.index.tolist():
round_lost = 16
break
player = input('\nThat player is not in the player pool. '
'Please re-enter the player, making sure '
'you spelled his name correctly: ')
if player:
if self.format == 'Snake:':
print('{} will count as your {} pick.\n'.format(
bold(player), bold(ordinal(round_lost) + ' Round')))
self.keepers[owner] = {
'player': player, 'round': round_lost}
elif self.format == 'Salary Cap':
print('You have elected to keep {}.\n'.format(
bold(player)))
self.keepers[owner] = {'player': player, 'round': 0}
with open(self.keepers_pkl, 'wb') as f:
pickle.dump(self.keepers, f)
def _determine_draft_order(self):
random.shuffle(self.owners)
self.draft_order = [None] * len(self.owners)
for owner in self.owners:
input_str = "\n{}, you're up!\nWhich draft slot would you " \
"like? ".format(bold(owner))
slot = int(input(input_str))
while True:
if slot > 8 or slot < 1:
input_str = '\nSelect a number between 1 and 8: '
slot = int(input(input_str))
elif self.draft_order[slot - 1]:
input_str = '\nThat draft slot is already taken. ' \
'Pick a different one: '
slot = int(input(input_str))
else:
self.draft_order[slot - 1] = owner
break
with open(self.draft_order_pkl, 'wb') as f:
pickle.dump(self.draft_order, f)
@staticmethod
def _fill_depth_chart(owner, position, depth_charts):
spots = depth_charts[owner].index.tolist()
spot = ''
for spot in spots:
if position in spot and pd.isnull(
depth_charts[owner].at[spot, 'Player']):
return spot
elif (position == 'RB' or position == 'WR') and spot == \
'FLEX' and pd.isnull(depth_charts[owner].at[
spot, 'Player']):
return spot
elif 'Bench' in spot and pd.isnull(
depth_charts[owner].at[spot, 'Player']):
return spot
return spot[:-1] + str(int(spot[-1]) + 1)
def _update_data_structs(self, the_pick):
# Update depth chart / draft histories
print(the_pick)
self.draft_history.loc[self.pick] = the_pick
self.draft_history_indv[the_pick['Owner']].loc[
self.pick] = the_pick.drop('Owner')
index = self._fill_depth_chart(
the_pick['Owner'], the_pick['Position'], self.depth_charts)
self.depth_charts[the_pick['Owner']].loc[index] = the_pick.drop(
['Owner', 'Position'])
self.depth_charts[the_pick['Owner']] = self.depth_charts[
the_pick['Owner']].astype({'Bye': pd.Int64Dtype()})
# Sort draft histories
self.draft_history = self.draft_history.sort_values(
'Pick Overall')
for own in self.owners:
self.draft_history_indv[own] = \
self.draft_history_indv[own].sort_values(
'Pick Overall')
def _save_data(self):
# Save excel spreedsheets
writer = | pd.ExcelWriter(self.results) | pandas.ExcelWriter |
# -*- coding: utf-8 -*-
# coding: utf-8
#Naive Bayes
import os
import io
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
#Function to read files (emails) from the local directory
def readFiles(path):
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
path = os.path.join(root, filename)
inBody = False
lines = []
f = io.open(path, 'r', encoding='latin1')
for line in f:
if inBody:
lines.append(line)
elif line == '\n':
inBody = True
f.close()
message = '\n'.join(lines)
yield path, message
def dataFrameFromDirectory(path, classification):
rows = []
index = []
for filename, message in readFiles(path):
rows.append({'message': message, 'class': classification})
index.append(filename)
return DataFrame(rows, index=index)
#An empty dataframe with 'message' and 'class' headers
data = | DataFrame({'message': [], 'class': []}) | pandas.DataFrame |
# _ __ _ _
# /\_/\ | '__| | | |
# [===] | | | |_| |
# \./ |_| \__,_|
#
# /***************//***************//***************/
# /* statspack.py *//* <NAME> *//* www.hakkeray.com */
# /***************//***************//***************/
# ________________________
# | hakkeray | Updated: |
# | v3.0.0 | 8.12.2020 |
# ------------------------
#
# * note: USZIPCODE pypi library is required to run zip_stats()
# Using pip in the notebook:
# !pip install -U uszipcode
# fsds tool required
# !pip install -U fsds_100719
# STANDARD libraries
import pandas as pd
from pandas import Series
import numpy as np
from numpy import log
# PLOTTING
import matplotlib as mpl
import matplotlib.pyplot as plt
import IPython.display as dp
plt.style.use('seaborn-bright')
mpl.style.use('seaborn-bright')
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 24}
mpl.rc('font', **font)
import seaborn as sns
sns.set_style('whitegrid')
#ignore pink warnings
import warnings
warnings.filterwarnings('ignore')
# Allow for large # columns
pd.set_option('display.max_columns', 0)
# pd.set_option('display.max_rows','')
# import plotly.express as px
# import plotly.graph_objects as go
# STATSMODELS
import statsmodels.api as sm
import statsmodels.stats.api as sms
import statsmodels.formula.api as smf
#import statsmodels.formula.api as ols
import statsmodels.stats.multicomp
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# SCIPY
import scipy.stats as stats
from scipy.stats import normaltest as normtest # D'Agostino and Pearson's omnibus test
from collections import Counter
# SKLEARN
from sklearn.metrics import mean_squared_error as mse
from sklearn.preprocessing import RobustScaler
# ADDITIONAL LIBRARIES
#import researchpy as rp
import uszipcode
from uszipcode import SearchEngine
# HOT_STATS() function: display statistical summaries of a feature column
def hot_stats(data, column, verbose=False, t=None):
"""
Scans the values of a column within a dataframe and displays its datatype,
nulls (incl. pct of total), unique values, non-null value counts, and
statistical info (if the datatype is numeric).
---------------------------------------------
Parameters:
**args:
data: accepts dataframe
column: accepts name of column within dataframe (should be inside quotes '')
**kwargs:
verbose: (optional) accepts a boolean (default=False); verbose=True will display all
unique values found.
t: (optional) accepts column name as target to calculate correlation coefficient against
using pandas data.corr() function.
-------------
Examples:
hot_stats(df, 'str_column') --> where df = data, 'string_column' = column you want to scan
hot_stats(df, 'numeric_column', t='target') --> where 'target' = column to check correlation value
-----------------
Future:
#todo: get mode(s)
#todo: functionality for string objects
#todo: pass multiple columns at once and display all
-----------------
"""
# assigns variables to call later as shortcuts
feature = data[column]
rdash = "-------->"
ldash = "<--------"
# figure out which hot_stats to display based on dtype
if feature.dtype == 'float':
hot_stats = feature.describe().round(2)
elif feature.dtype == 'int':
hot_stats = feature.describe()
elif feature.dtype == 'object' or 'category' or 'datetime64[ns]':
hot_stats = feature.agg(['min','median','max'])
t = None # ignores corr check for non-numeric dtypes by resetting t
else:
hot_stats = None
# display statistics (returns different info depending on datatype)
print(rdash)
print("HOT!STATS")
print(ldash)
# display column name formatted with underline
print(f"\n{feature.name.upper()}")
# display the data type
print(f"Data Type: {feature.dtype}\n")
# display the mode
print(hot_stats,"\n")
print(f"à-la-Mode: \n{feature.mode()}\n")
# find nulls and display total count and percentage
if feature.isna().sum() > 0:
print(f"Found\n{feature.isna().sum()} Nulls out of {len(feature)}({round(feature.isna().sum()/len(feature)*100,2)}%)\n")
else:
print("\nNo Nulls Found!\n")
# display value counts (non-nulls)
print(f"Non-Null Value Counts:\n{feature.value_counts()}\n")
# display count of unique values
print(f"# Unique Values: {len(feature.unique())}\n")
# displays all unique values found if verbose set to true
if verbose == True:
print(f"Unique Values:\n {feature.unique()}\n")
# display correlation coefficient with target for numeric columns:
if t != None:
corr = feature.corr(data[t]).round(4)
print(f"Correlation with {t.upper()}: {corr}")
# NULL_HUNTER() function: display Null counts per column/feature
def null_hunter(data):
print(f"Columns with Null Values")
print("------------------------")
for column in data:
if data[column].isna().sum() > 0:
print(f"{data[column].name}: \n{data[column].isna().sum()} out of {len(data[column])} ({round(data[column].isna().sum()/len(data[column])*100,2)}%)\n")
# CORRCOEF_DICT() function: calculates correlation coefficients assoc. with features and stores in a dictionary
def corr_dict(data, X, y):
corr_coefs = []
for x in X:
corr = data[x].corr(data[y])
corr_coefs.append(corr)
corr_dict = {}
for x, c in zip(X, corr_coefs):
corr_dict[x] = c
return corr_dict
# SUB_SCATTER() function: pass list of features (x_cols) and compare against target (or another feature)
def sub_scatter(data, x_cols, y, color=None, nrows=None, ncols=None):
"""
Desc: displays set of scatterplots for multiple columns or features of a dataframe.
pass in list of column names (x_cols) to plot against y-target (or another feature for
multicollinearity analysis)
args: data, x_cols, y
kwargs: color (default is magenta (#C839C5))
example:
x_cols = ['col1', 'col2', 'col3']
y = 'col4'
sub_scatter(df, x_cols, y)
example with color kwarg:
sub_scatter(df, x_cols, y, color=#)
alternatively you can pass the column list and target directly:
sub_scatter(df, ['col1', 'col2', 'col3'], 'price')
"""
if nrows == None:
nrows = 1
if ncols == None:
ncols = 3
if color == None:
color = '#C839C5'
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16,4))
for x_col, ax in zip(x_cols, axes):
data.plot(kind='scatter', x=x_col, y=y, ax=ax, color=color)
ax.set_title(x_col.capitalize() + " vs. " + y.capitalize())
# SUB_HISTS() function: plot histogram subplots
def sub_hists(data):
plt.style.use('seaborn-bright')
for column in data.describe():
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(121)
ax.hist(data[column], density=True, label = column+' histogram', bins=20)
ax.set_title(column.capitalize())
ax.legend()
fig.tight_layout()
# --------- ZIP_STATS() --------- #
def zip_stats(zipcodes,
minimum=0, maximum=5000000,
simple=True):
"""
Lookup median home values for zipcodes or return zip codes of a min and max median home value
#TODO: add input options for city state county
#TODO: add input options for other keywords besides median home val
*Prerequisites: USZIPCODE() pypi package is a required dependency
**ARGS
zipcodes: dataframe or array of strings (zipcodes)
> Example1: zipcodes=df[zipcode']
> Example2: zipcodes=['01267','90025']
minimum: integer for dollar amount min threshold (default is 0)
maximum: integer for dollar amount max threshold (default is 5000000, i.e. no maximum)
**KWARGS
simple: default=True
> set simple_zipcode=False to use rich info database (will only apply once TODOs above are added)
"""
# pypi package for retrieving information based on us zipcodes
import uszipcode
from uszipcode import SearchEngine
# set simple_zipcode=False to use rich info database
if simple:
search = SearchEngine(simple_zipcode=True)
else:
search = SearchEngine(simple_zipcode=False)
# create empty dictionary
dzip = {}
# search pypi uszipcode library to retrieve data for each zipcode
for code in zipcodes:
z = search.by_zipcode(code)
dzip[code] = z.to_dict()
keyword='median_home_value'
# # pull just the median home values from dataset and append to list
# create empty lists for keys and vals
keys = []
zips = []
for index in dzip:
keys.append(dzip[index][keyword])
# put zipcodes in other list
for index in dzip:
zips.append(dzip[index]['zipcode'])
# zip both lists into dictionary
zipkey = dict(zip(zips, keys))
zipvals = {}
for k,v in zipkey.items():
if v > minimum and v < maximum:
zipvals[k]=v
return zipvals
"""
>>>>>>>>>>>>>>>>>> TIME SERIES <<<<<<<<<<<<<<<<<<<<<<
* makeTime()
* checkTime()
* mapTime()
"""
def makeTime(data, idx):
"""
Converts a column (`idx`) to datetime formatted index for a dataframe (`data`)
Returns copy of original dataframe
new_df = makeTime(df_original, 'DateTime')
"""
df = data.copy()
df[idx] = pd.to_datetime(df[idx], errors='coerce')
df['DateTime'] = df[idx].copy()
df.set_index(idx, inplace=True, drop=True)
return df
def melt_data(df): # from flatiron starter notebook
melted = pd.melt(df, id_vars=['RegionID','RegionName', 'City', 'State', 'Metro', 'CountyName',
'SizeRank'], var_name='Month', value_name='MeanValue')
melted['Month'] = pd.to_datetime(melted['Month'], format='%Y-%m')
melted = melted.dropna(subset=['MeanValue'])
return melted
def cityzip_dicts(df, col1, col2):
"""
Creates 3 dictionaries:
# dc1 : Dictionary of cities and zipcodes for quick referencing
# dc2: Dictionary of dataframes for each zipcode.
# city_zip: dictionary of zipcodes for each city
dc1 key: zipcodes
dc2 key: cities
city_zip key: city name
Returns dc1, dc2, city_zip
Ex:
NYC, nyc, city_zip = cityzip_dictionaries(df=NY, col1='RegionName', col2='City')
# dc1: returns dataframe for a given zipcode, or dict values of given column
NYC[10549] --> dataframe
NYC[10549]['MeanValue'] --> dict
# dc2: return dataframe for a given city, or just zipcodes for a given city:
nyc['New Rochelle'] --> dataframe
nyc['New Rochelle']['RegionName'].unique() --> dict of zip codes
# city_zip: returns dict of all zip codes in a city
city_zip['Yonkers']
"""
dc1 = {}
dc2 = {}
for zipcode in df[col1].unique():
dc1[zipcode] = df.groupby(col1).get_group(zipcode).resample('MS').asfreq()
for city in df[col2].unique():
dc2[city] = df.groupby(col2).get_group(city)
# create reference dict of city and zipcode matches
#zipcodes, cities in westchester
zips = df.RegionName.unique() #cities
cities = df.City.unique()
print("# ZIP CODES: ", len(zips))
print("# CITIES: ", len(cities))
city_zip = {}
for city in cities:
c = str(f'{city}')
city = df.loc[df['City'] == city]
zc = list(city['RegionName'].unique())
city_zip[c] = zc
return dc1, dc2, city_zip
def time_dict(d, xcol='RegionName', ycol='MeanValue'):
# zipcodes to plot
zipcodes = list(d.keys())
# create empty dictionary for plotting
txd = {}
for i,zc in enumerate(zipcodes):
# store each zipcode as ts
ts = d[zc][ycol].rename(zc)
txd[zc] = ts
return txd
def mapTime(d, xcol, ycol='MeanValue', X=None, vlines=None, MEAN=True):
"""
Draws a timeseries 'map' of zipcodes and their mean values.
fig,ax = mapTime(d=HUDSON, xcol='RegionName', ycol='MeanValue', MEAN=True, vlines=None)
**ARGS
d: takes a dictionary of dataframes OR a single dataframe
xcol: column in dataframe containing x-axis values (ex: zipcode)
ycol: column in dataframe containing y-axis values (ex: price)
X: list of x values to plot on x-axis (defaults to all x in d if empty)
**kw_args
mean: plots the mean of X (default=True)
vlines : default is None: shows MIN_, MAX_, crash
*Ex1: `d` = dataframe
mapTime(d=NY, xcol='RegionName', ycol='MeanValue', X=list_of_zips)
*Ex2: `d` = dictionary of dataframes
mapTime(d=NYC, xcol='RegionName', y='MeanValue')
"""
import matplotlib as mpl
mpl.rc('font', **font)
font = {'family' : 'monospace',
'weight' : 'bold',
'size' : 24}
#mpl.rc('font', **font)
# create figure for timeseries plot
fig, ax = plt.subplots(figsize=(21,13))
plt.title(label=f'Time Series Plot: {str(ycol)}')
ax.set(title='Mean Home Values', xlabel='Year', ylabel='Price($)', font_dict=font)
zipcodes = []
#check if `d` is dataframe or dictionary
if type(d) == pd.core.frame.DataFrame:
# if X is empty, create list of all zipcodes
if len(X) == 0:
zipcodes = list(d[xcol].unique())
else:
zipcodes = X
# cut list in half
breakpoint = len(zipcodes)//2
for zc in zipcodes:
if zc < breakpoint:
ls='-'
else:
ls='--'
ts = d[zc][ycol].rename(zc)#.loc[zc]
ts = d[ycol].loc[zc]
### PLOT each zipcode as timeseries `ts`
ts.plot(label=str(zc), ax=ax, ls=ls)
## Calculate and plot the MEAN
if MEAN:
mean = d[ycol].mean(axis=1)
mean.plot(label='Mean',lw=5,color='black')
elif type(d) == dict:
# if X passed in as empty list, create list of all zipcodes
if len(X) == 0:
zipcodes = list(d.keys())
else:
zipcodes = X
# cut list in half
breakpoint = len(zipcodes)//2
# create empty dictionary for plotting
txd = {}
# create different linestyles for zipcodes (easier to distinguish if list is long)
for i,zc in enumerate(zipcodes):
if i < breakpoint:
ls='-'
else:
ls='--'
# store each zipcode as ts
ts = d[zc][ycol].rename(zc)
### PLOT each zipcode as timeseries `ts`
ts.plot(label=str(zc), ax=ax, ls=ls, lw=2)
txd[zc] = ts
if MEAN:
mean = pd.DataFrame(txd).mean(axis=1)
mean.plot(label='Mean',lw=5,color='black')
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", ncol=2)
if vlines:
## plot crash, min and max vlines
crash = '01-2009'
ax.axvline(crash, label='Housing Index Drops',color='red',ls=':',lw=2)
MIN_ = ts.loc[crash:].idxmin()
MAX_ = ts.loc['2004':'2010'].idxmax()
ax.axvline(MIN_, label=f'Min Price Post Crash {MIN_}', color='black',lw=2)
ax.axvline(MAX_,label='Max Price', color='black', ls=':',lw=2)
return fig, ax
# # Check Seasonality
def freeze_time(ts, mode='A'):
"""
Calculates and plots Seasonal Decomposition for a time series
ts : time-series
mode : 'A' for 'additive' or 'M' for 'multiplicative'
"""
from statsmodels.tsa.seasonal import seasonal_decompose
if mode == 'A': #default
decomp = seasonal_decompose(ts, model='additive')
elif mode == 'M':
decomp = seasonal_decompose(ts, model='multiplicative')
freeze = decomp.plot()
ts_seas = decomp.seasonal
plt.figure()
plt.tight_layout()
ax = ts_seas.plot(c='green')
fig = ax.get_figure()
fig.set_size_inches(12,5)
## Get min and max idx
min_ = ts_seas.idxmin()
max_ = ts_seas.idxmax()
min_2 = ts_seas.loc[max_:].idxmin()
ax.axvline(min_,label=min_,c='red')
ax.axvline(max_,c='red',ls=':', lw=2)
ax.axvline(min_2,c='red', lw=2)
period = min_2 - min_
ax.set_title(f'Season Length = {period}')
return freeze
#### clockTime() --- time-series snapshot statistical summary ###
#
# /\ /\ /\ /\
# / CLOCKTIME STATS /
# \/ \/ \/
#
"""
clockTime()
Dependencies include the following METHODS:
- check_time(data, time) >>> convert to datetimeindex
- test_time(TS, y) >>> dickey-fuller (stationarity) test
- roll_time() >>> rolling mean
- freeze_time() >>> seasonality check
- diff_time() >>> differencing
- autoplot() >>> autocorrelation and partial autocorrelation plots
"""
# class clockTime():
# def __init__(data, time, x1, x2, y, freq=None):
# self.data = data
# self.time = time
# self.x1 = x1
# self.x2 = x2
# self.y = y
# self.freq = freq
def clockTime(ts, lags, d, TS, y):
"""
/\ /\ /\ /\ ______________/\/\/\__-_-_
/ CLOCKTIME STATS / \/
\/ \/ \/
# clockTime(ts, lags=43, d=5, TS=NY, y='MeanValue',figsize=(13,11))
#
# ts = df.loc[df['RegionName']== zc]["MeanValue"].rename(zc).resample('MS').asfreq()
"""
# import required libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from numpy import log
import pandas as pd
from pandas import Series
from pandas.plotting import autocorrelation_plot
from pandas.plotting import lag_plot
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf
from statsmodels.graphics.tsaplots import plot_pacf
print(' /\\ '*3+' /')
print('/ CLOCKTIME STATS')
print(' \/'*3)
#**************#
# Plot Time Series
#original
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(21,13))
ts.plot(label='Original', ax=axes[0,0],c='red')
# autocorrelation
autocorrelation_plot(ts, ax=axes[0,1], c='magenta')
# 1-lag
autocorrelation_plot(ts.diff().dropna(), ax=axes[1,0], c='green')
lag_plot(ts, lag=1, ax=axes[1,1])
plt.tight_layout()
plt.gcf().autofmt_xdate()
plt.show();
# DICKEY-FULLER Stationarity Test
# TS = NY | y = 'MeanValue'
dtest = adfuller(TS[y].dropna())
if dtest[1] < 0.05:
## difference data before checking autoplot
stationary = False
r = 'rejected'
else:
### skip differencing and check autoplot
stationary = True
r = 'accepted'
#**************#
# ts orders of difference
ts1 = ts.diff().dropna()
ts2 = ts.diff().diff().dropna()
ts3 = ts.diff().diff().diff().dropna()
ts4 = ts.diff().diff().diff().diff().dropna()
tdiff = [ts1,ts2,ts3,ts4]
# Calculate Standard Deviation of Differenced Data
sd = []
for td in tdiff:
sd.append(np.std(td))
#sd = [np.std(ts1), np.std(ts2),np.std(ts3),np.std(ts4)]
SD = | pd.DataFrame(data=sd,index=['ts1',' ts2', 'ts3', 'ts4'], columns={'sd'}) | pandas.DataFrame |
import pytest
import os
import sqlite3
import shutil
import numpy as np
import pandas as pd
from sqlalchemy import and_
from ticclat.ticclat_schema import Wordform, Lexicon, Anahash, \
WordformLinkSource, MorphologicalParadigm
from ticclat.utils import read_json_lines, read_ticcl_variants_file
from ticclat.dbutils import bulk_add_wordforms, add_lexicon, \
get_word_frequency_df, bulk_add_anahashes, \
connect_anahashes_to_wordforms, update_anahashes, get_wf_mapping, \
add_lexicon_with_links, write_wf_links_data, add_morphological_paradigms, \
empty_table, add_ticcl_variants
from . import data_dir
# Make sure np.int64 are inserted into the testing database as integers and
# not binary data.
# Source: https://stackoverflow.com/questions/38753737
sqlite3.register_adapter(np.int64, int)
def test_bulk_add_wordforms_all_new(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wordforms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wordforms) == len(wfs['wordform'])
assert [wf.wordform for wf in wordforms] == list(wfs['wordform'])
def test_bulk_add_wordforms_some_new(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wfs['wordform'] = ['wf3', 'wf4', 'wf5']
wfs['wordform_lowercase'] = ['wf3', 'wf4', 'wf4']
n = bulk_add_wordforms(dbsession, wfs)
assert n == 2
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 5
assert [w.wordform for w in wrdfrms] == ['wf1', 'wf2', 'wf3', 'wf4', 'wf5']
def test_bulk_add_wordforms_not_unique(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf1', 'wf2']
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 2
def test_bulk_add_wordforms_whitespace(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1 ', ' wf2', ' ', ' \t']
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 2
assert wrdfrms[0].wordform == 'wf1'
assert wrdfrms[1].wordform == 'wf2'
def test_bulk_add_wordforms_drop_empty_and_nan(dbsession):
wfs = pd.DataFrame()
wfs["wordform"] = ["wf1", "", "wf2", np.NaN]
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 2
assert wrdfrms[0].wordform == "wf1"
assert wrdfrms[1].wordform == "wf2"
def test_bulk_add_wordforms_replace_spaces(dbsession):
wfs = pd.DataFrame()
wfs["wordform"] = ["wf 1", "wf2"]
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 2
assert wrdfrms[0].wordform == "wf_1"
assert wrdfrms[1].wordform == "wf2"
def test_bulk_add_wordforms_replace_underscores(dbsession):
wfs = pd.DataFrame()
wfs["wordform"] = ["wf_1", "wf 2"]
print(dbsession)
bulk_add_wordforms(dbsession, wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 2
assert wrdfrms[0].wordform == "wf*1"
assert wrdfrms[1].wordform == "wf_2"
def test_add_lexicon(dbsession):
name = 'test lexicon'
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
add_lexicon(dbsession, lexicon_name=name, vocabulary=True, wfs=wfs)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert len(wrdfrms) == 3
lexicons = dbsession.query(Lexicon).all()
assert len(lexicons) == 1
assert lexicons[0].lexicon_name == name
assert len(lexicons[0].lexicon_wordforms) == 3
wrdfrms = sorted([w.wordform for w in lexicons[0].lexicon_wordforms])
assert wrdfrms == list(wfs['wordform'])
def test_get_word_frequency_df(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
bulk_add_wordforms(dbsession, wfs)
freq_df = get_word_frequency_df(dbsession)
expected = pd.DataFrame({'wordform': ['wf1', 'wf2', 'wf3'],
'frequency': [1, 1, 1]}).set_index('wordform')
assert freq_df.equals(expected)
def test_get_word_frequency_df_empty(dbsession):
freq_df = get_word_frequency_df(dbsession)
assert freq_df is None
def test_bulk_add_anahashes(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
bulk_add_wordforms(dbsession, wfs)
a = pd.DataFrame({'wordform': ['wf1', 'wf2', 'wf3'],
'anahash': [1, 2, 3]}).set_index('wordform')
bulk_add_anahashes(dbsession, a)
ahs = dbsession.query(Anahash).order_by(Anahash.anahash_id).all()
print(ahs[0])
assert [a.anahash for a in ahs] == list(a['anahash'])
def test_connect_anahashes_to_wordforms(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
bulk_add_wordforms(dbsession, wfs)
wfs = get_word_frequency_df(dbsession, add_ids=True)
wf_mapping = wfs['wordform_id'].to_dict()
a = pd.DataFrame({'wordform': ['wf1', 'wf2', 'wf3'],
'anahash': [1, 2, 3]}).set_index('wordform')
bulk_add_anahashes(dbsession, a)
connect_anahashes_to_wordforms(dbsession, a, wf_mapping)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert [wf.anahash.anahash for wf in wrdfrms] == list(a['anahash'])
def test_connect_anahashes_to_wordforms_empty(dbsession):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
bulk_add_wordforms(dbsession, wfs)
a = pd.DataFrame({'wordform': ['wf1', 'wf2', 'wf3'],
'anahash': [1, 2, 3]}).set_index('wordform')
bulk_add_anahashes(dbsession, a)
connect_anahashes_to_wordforms(dbsession, a, a['anahash'].to_dict())
# nothing was updated the second time around (the values didn't change)
# (and there is no error when running this)
connect_anahashes_to_wordforms(dbsession, a, a['anahash'].to_dict())
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert [wf.anahash.anahash for wf in wrdfrms] == list(a['anahash'])
@pytest.mark.skipif(shutil.which('TICCL_anahash') is not None, reason='Install TICCL before testing this.')
@pytest.mark.datafiles(os.path.join(data_dir(), 'alphabet'))
def test_update_anahashes(dbsession, datafiles):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf-a', 'wf-b', 'wf-c']
alphabet_file = datafiles.listdir()[0]
bulk_add_wordforms(dbsession, wfs)
anahashes = dbsession.query(Anahash).order_by(Anahash.anahash_id).all()
assert len(anahashes) == 0
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
for w in wrdfrms:
assert(w.anahash) is None
update_anahashes(dbsession, alphabet_file)
# If we don't commit here, the anahashes won't be updated when we do the
# tests.
dbsession.commit()
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
anahashes = dbsession.query(Anahash).order_by(Anahash.anahash_id).all()
# Three anahashes were added
assert len(anahashes) == 3
# The anahashes are connected to the correct wordforms
for wf, a in zip(wrdfrms, (3, 2, 1)):
assert wf.anahash_id == a
@pytest.mark.skipif(shutil.which('TICCL_anahash') is not None, reason='Install TICCL before testing this.')
@pytest.mark.datafiles(os.path.join(data_dir(), 'alphabet'))
def test_update_anahashes_empty_wf(dbsession, datafiles):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf-a', 'wf-b', 'wf-c', ' ']
alphabet_file = datafiles.listdir()[0]
bulk_add_wordforms(dbsession, wfs)
# make sure ticcl doesn't choke on the empty wordform (it must not be added
# to the database)
update_anahashes(dbsession, alphabet_file)
@pytest.mark.datafiles(os.path.join(data_dir(), 'alphabet'))
def test_update_anahashes_nothing_to_update(dbsession, datafiles):
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
bulk_add_wordforms(dbsession, wfs)
a = pd.DataFrame({'wordform': ['wf1', 'wf2', 'wf3'],
'anahash': [1, 2, 3]}).set_index('wordform')
bulk_add_anahashes(dbsession, a)
connect_anahashes_to_wordforms(dbsession, a, a['anahash'].to_dict())
alphabet_file = datafiles.listdir()[0]
update_anahashes(dbsession, alphabet_file)
wrdfrms = dbsession.query(Wordform).order_by(Wordform.wordform_id).all()
assert [wf.anahash.anahash for wf in wrdfrms] == list(a['anahash'])
def test_get_wf_mapping_lexicon(dbsession):
name = 'test lexicon'
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
lex = add_lexicon(dbsession, lexicon_name=name, vocabulary=True, wfs=wfs)
wf_mapping = get_wf_mapping(dbsession, lexicon=lex)
for w in wfs['wordform']:
assert w in wf_mapping.keys()
def test_get_wf_mapping_lexicon_no_id(dbsession):
name = 'test lexicon'
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
lex = Lexicon(lexicon_name=name)
with pytest.raises(ValueError):
get_wf_mapping(dbsession, lexicon=lex)
def test_get_wf_mapping_lexicon_id(dbsession):
name = 'test lexicon'
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3']
print(dbsession)
lex = add_lexicon(dbsession, lexicon_name=name, vocabulary=True, wfs=wfs)
lexicon_id = lex.lexicon_id
wf_mapping = get_wf_mapping(dbsession, lexicon_id=lexicon_id)
for w in wfs['wordform']:
assert w in wf_mapping.keys()
def test_write_wf_links_data(dbsession, fs):
wfl_file = 'wflinks'
wfls_file = 'wflsources'
name = 'linked test lexicon'
wfs = pd.DataFrame()
wfs['wordform'] = ['wf1', 'wf2', 'wf3', 'wf1s', 'wf2s', 'wf3s']
lex = add_lexicon(dbsession, lexicon_name=name, vocabulary=True, wfs=wfs)
wfs = pd.DataFrame()
wfs['lemma'] = ['wf1', 'wf2', 'wf3']
wfs['variant'] = ['wf1s', 'wf2s', 'wf3s']
wfm = get_wf_mapping(dbsession, lexicon=lex)
links_file = open(wfl_file, 'w')
sources_file = open(wfls_file, 'w')
num_l, num_s = write_wf_links_data(
dbsession,
wf_mapping=wfm,
links_df=wfs,
wf_from_name='lemma',
wf_to_name='variant',
lexicon_id=lex.lexicon_id,
wf_from_correct=True,
wf_to_correct=True,
links_file=links_file,
sources_file=sources_file,
)
links_file.close()
sources_file.close()
links_file = open(wfl_file, 'r')
sources_file = open(wfls_file, 'r')
assert num_l == 3 * 2
assert num_s == 3 * 2
wflinks = []
for wf1, wf2 in zip(wfs['lemma'], wfs['variant']):
wflinks.append({"wordform_from": wfm[wf1], "wordform_to": wfm[wf2]})
wflinks.append({"wordform_from": wfm[wf2], "wordform_to": wfm[wf1]})
wflsources = []
for wfl in wflinks:
wflsources.append({"wordform_from": wfl['wordform_from'],
"wordform_to": wfl['wordform_to'],
"lexicon_id": lex.lexicon_id,
"wordform_from_correct": True,
"wordform_to_correct": True})
for wfls1, wfls2 in zip(read_json_lines(sources_file), wflsources):
assert wfls1 == wfls2
links_file.close()
sources_file.close()
def test_add_lexicon_with_links(dbsession):
name = 'linked test lexicon'
wfs = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from minder_utils.util import formatting_plots
from minder_utils.configurations import visual_config
sns.set()
class Visual_Sleep:
def __init__(self, path, style='age', filename='imperial_dementia_20211026'):
'''
Visualise the sleep data
Parameters
----------
path: str, path to the sleep data
style: str, plot style
- age: lineplot, hue = age
- joint: lineplot, hue = age, style = Sleep Time
- face: facegrid
- re: relation plot
'''
self.config = visual_config['sleep']
self.style = style
if 'imperial' in filename:
self.data = pd.read_csv(os.path.join(path, filename + '.csv'), delimiter=';')
else:
self.data = pd.read_csv(os.path.join(path, filename + '.csv'))
# Divide the data by time
self.data.start_date = pd.to_datetime(self.data.start_date)
self.data['Sleep Time'] = 'Late'
index = | pd.DatetimeIndex(self.data.start_date) | pandas.DatetimeIndex |
import os
import sys
import pytest
from shapely.geometry import Polygon, GeometryCollection
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from tests.fixtures import *
from tests.test_core_components_route import self_looping_route, route
from tests.test_core_components_service import service
from genet.inputs_handler import matsim_reader, gtfs_reader
from genet.inputs_handler import read
from genet.schedule_elements import Schedule, Service, Route, Stop, read_vehicle_types
from genet.utils import plot, spatial
from genet.validate import schedule_validation
from genet.exceptions import ServiceIndexError, RouteIndexError, StopIndexError, UndefinedCoordinateSystemError, \
ConflictingStopData, InconsistentVehicleModeError
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
pt2matsim_vehicles_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "vehicles.xml"))
@pytest.fixture()
def schedule():
route_1 = Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='2',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
@pytest.fixture()
def strongly_connected_schedule():
route_1 = Route(route_short_name='name',
mode='bus',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='3', x=3, y=3, epsg='epsg:27700', name='Stop_3'),
Stop(id='4', x=7, y=5, epsg='epsg:27700', name='Stop_4'),
Stop(id='1', x=4, y=2, epsg='epsg:27700', name='Stop_1')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['1', '2'], departure_offsets=['1', '2'],
id='1')
route_2 = Route(route_short_name='name_2',
mode='bus',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5'),
Stop(id='2', x=1, y=2, epsg='epsg:27700', name='Stop_2'),
Stop(id='7', x=3, y=3, epsg='epsg:27700', name='Stop_7'),
Stop(id='8', x=7, y=5, epsg='epsg:27700', name='Stop_8'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='Stop_5')],
trips={'trip_id': ['1', '2'], 'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
arrival_offsets=['1', '2', '3', '4', '5'],
departure_offsets=['1', '2', '3', '4', '5'],
id='2')
service = Service(id='service', routes=[route_1, route_2])
return Schedule(epsg='epsg:27700', services=[service])
def test_initiating_schedule(schedule):
s = schedule
assert_semantically_equal(dict(s._graph.nodes(data=True)), {
'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'6': {'services': {'service'}, 'routes': {'2'}, 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766856648946295, 'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76683608549253, 'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()}})
assert_semantically_equal(s._graph.edges(data=True)._adjdict,
{'5': {'6': {'services': {'service'}, 'routes': {'2'}}},
'6': {'7': {'services': {'service'}, 'routes': {'2'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}}, '8': {}, '4': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}})
log = s._graph.graph.pop('change_log')
assert log.empty
assert_semantically_equal(s._graph.graph,
{'name': 'Schedule graph',
'routes': {'2': {'route_short_name': 'name_2', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_3_bus', 'veh_4_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '2', 'route': [],
'await_departure': [],
'ordered_stops': ['5', '6', '7', '8']},
'1': {'route_short_name': 'name', 'mode': 'bus',
'trips': {'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
'arrival_offsets': ['00:00:00', '00:03:00',
'00:07:00', '00:13:00'],
'departure_offsets': ['00:00:00', '00:05:00',
'00:09:00', '00:15:00'],
'route_long_name': '', 'id': '1', 'route': [],
'await_departure': [],
'ordered_stops': ['1', '2', '3', '4']}},
'services': {'service': {'id': 'service', 'name': 'name'}},
'route_to_service_map': {'1': 'service', '2': 'service'},
'service_to_route_map': {'service': ['1', '2']},
'crs': {'init': 'epsg:27700'}})
def test_initiating_schedule_with_non_uniquely_indexed_objects():
route_1 = Route(route_short_name='name',
mode='bus', id='',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'), Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'), Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['13:00:00', '13:30:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
route_2 = Route(route_short_name='name_2',
mode='bus', id='',
stops=[Stop(id='5', x=4, y=2, epsg='epsg:27700'), Stop(id='6', x=1, y=2, epsg='epsg:27700'),
Stop(id='7', x=3, y=3, epsg='epsg:27700'), Stop(id='8', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['11:00:00', '13:00:00'],
'vehicle_id': ['veh_2_bus', 'veh_3_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
service1 = Service(id='service', routes=[route_1, route_2])
service2 = Service(id='service', routes=[route_1, route_2])
s = Schedule(epsg='epsg:27700', services=[service1, service2])
assert s.number_of_routes() == 4
assert len(s) == 2
def test__getitem__returns_a_service(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert schedule['service'] == services[0]
def test_accessing_route(schedule):
assert schedule.route('1') == Route(route_short_name='name',
mode='bus', id='1',
stops=[Stop(id='1', x=4, y=2, epsg='epsg:27700'),
Stop(id='2', x=1, y=2, epsg='epsg:27700'),
Stop(id='3', x=3, y=3, epsg='epsg:27700'),
Stop(id='4', x=7, y=5, epsg='epsg:27700')],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['1', '2'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'])
def test__repr__shows_number_of_services(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
schedule = Schedule('epsg:27700')
s = schedule.__repr__()
assert 'instance at' in s
assert 'services' in s
Schedule.__len__.assert_called()
def test__str__shows_info():
schedule = Schedule('epsg:27700')
assert 'Number of services' in schedule.__str__()
assert 'Number of routes' in schedule.__str__()
def test__len__returns_the_number_of_services(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert len(schedule) == 1
def test_print_shows_info(mocker):
mocker.patch.object(Schedule, 'info')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.info.assert_called_once()
def test_info_shows_number_of_services_and_routes(mocker):
mocker.patch.object(Schedule, '__len__', return_value=0)
mocker.patch.object(Schedule, 'number_of_routes')
schedule = Schedule('epsg:27700')
schedule.print()
Schedule.__len__.assert_called()
Schedule.number_of_routes.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker, schedule):
mocker.patch.object(plot, 'plot_graph')
schedule.plot()
plot.plot_graph.assert_called_once()
def test_reproject_changes_projection_for_all_stops_in_route():
correct_x_y = {'x': -0.14967658860132668, 'y': 51.52393050617373}
schedule = Schedule(
'epsg:27700',
[Service(id='10314', routes=[
Route(
route_short_name='12',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_1_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])])
schedule.reproject('epsg:4326')
_stops = list(schedule.stops())
stops = dict(zip([stop.id for stop in _stops], _stops))
assert_semantically_equal({'x': stops['26997928P'].x, 'y': stops['26997928P'].y}, correct_x_y)
assert_semantically_equal({'x': stops['26997928P.link:1'].x, 'y': stops['26997928P.link:1'].y}, correct_x_y)
def test_adding_merges_separable_schedules(route):
schedule = Schedule(epsg='epsg:4326', services=[Service(id='1', routes=[route])])
before_graph_nodes = schedule.reference_nodes()
before_graph_edges = schedule.reference_edges()
a = Stop(id='10', x=40, y=20, epsg='epsg:27700', linkRefId='1')
b = Stop(id='20', x=10, y=20, epsg='epsg:27700', linkRefId='2')
c = Stop(id='30', x=30, y=30, epsg='epsg:27700', linkRefId='3')
d = Stop(id='40', x=70, y=50, epsg='epsg:27700', linkRefId='4')
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[Service(id='2', routes=[
Route(
route_short_name='name',
mode='bus',
stops=[a, b, c, d],
trips={'trip_id': ['1', '2'],
'trip_departure_time': ['04:40:00', '05:40:00'],
'vehicle_id': ['veh_1_bus', 'veh_2_bus']},
arrival_offsets=['00:00:00', '00:03:00', '00:07:00', '00:13:00'],
departure_offsets=['00:00:00', '00:05:00', '00:09:00', '00:15:00'],
route=['1', '2', '3', '4'], id='2')
])])
tba_graph_nodes = schedule_to_be_added.reference_nodes()
tba_graph_edges = schedule_to_be_added.reference_edges()
schedule.add(schedule_to_be_added)
assert '1' in list(schedule.service_ids())
assert '2' in list(schedule.service_ids())
assert '1' in list(schedule.route_ids())
assert '2' in list(schedule.route_ids())
assert schedule.epsg == 'epsg:4326'
assert schedule.epsg == schedule_to_be_added.epsg
assert set(schedule._graph.nodes()) == set(before_graph_nodes) | set(tba_graph_nodes)
assert set(schedule._graph.edges()) == set(before_graph_edges) | set(tba_graph_edges)
def test_adding_throws_error_when_schedules_not_separable(test_service):
schedule = Schedule(epsg='epsg:4326', services=[test_service])
assert 'service' in schedule
schedule_to_be_added = Schedule(epsg='epsg:4326', services=[test_service])
with pytest.raises(NotImplementedError) as e:
schedule.add(schedule_to_be_added)
assert 'This method only supports adding non overlapping services' in str(e.value)
def test_adding_calls_on_reproject_when_schedules_dont_have_matching_epsg(test_service, different_test_service, mocker):
mocker.patch.object(Schedule, 'reproject')
schedule = Schedule(services=[test_service], epsg='epsg:27700')
assert schedule.has_service('service')
schedule_to_be_added = Schedule(services=[different_test_service], epsg='epsg:4326')
schedule.add(schedule_to_be_added)
schedule_to_be_added.reproject.assert_called_once_with('epsg:27700')
def test_service_ids_returns_keys_of_the_services_dict(test_service):
services = [test_service]
schedule = Schedule(services=services, epsg='epsg:4326')
assert set(schedule.service_ids()) == {'service'}
def test_routes_returns_service_ids_with_unique_routes(route, similar_non_exact_test_route):
services = [Service(id='1', routes=[route]), Service(id='2', routes=[similar_non_exact_test_route])]
schedule = Schedule(services=services, epsg='epsg:4326')
routes = list(schedule.routes())
assert route in routes
assert similar_non_exact_test_route in routes
assert len(routes) == 2
def test_number_of_routes_counts_routes(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4362')
assert schedule.number_of_routes() == 3
def test_service_attribute_data_under_key(schedule):
df = schedule.service_attribute_data(keys='name').sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}}
))
def test_service_attribute_data_under_keys(schedule):
df = schedule.service_attribute_data(keys=['name', 'id']).sort_index()
assert_frame_equal(df, DataFrame(
{'name': {'service': 'name'}, 'id': {'service': 'service'}}
))
def test_route_attribute_data_under_key(schedule):
df = schedule.route_attribute_data(keys='route_short_name').sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}}
))
def test_route_attribute_data_under_keys(schedule):
df = schedule.route_attribute_data(keys=['route_short_name', 'mode']).sort_index()
assert_frame_equal(df, DataFrame(
{'route_short_name': {'1': 'name', '2': 'name_2'}, 'mode': {'1': 'bus', '2': 'bus'}}
))
def test_stop_attribute_data_under_key(schedule):
df = schedule.stop_attribute_data(keys='x').sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0}}))
def test_stop_attribute_data_under_keys(schedule):
df = schedule.stop_attribute_data(keys=['x', 'y']).sort_index()
assert_frame_equal(df, DataFrame(
{'x': {'1': 4.0, '2': 1.0, '3': 3.0, '4': 7.0, '5': 4.0, '6': 1.0, '7': 3.0, '8': 7.0},
'y': {'1': 2.0, '2': 2.0, '3': 3.0, '4': 5.0, '5': 2.0, '6': 2.0, '7': 3.0, '8': 5.0}}))
def test_extracting_services_on_condition(schedule):
ids = schedule.extract_service_ids_on_attributes(conditions={'name': 'name'})
assert ids == ['service']
def test_extracting_routes_on_condition(schedule):
ids = schedule.extract_route_ids_on_attributes(conditions=[{'mode': 'bus'}, {'route_short_name': 'name_2'}],
how=all)
assert ids == ['2']
def test_extracting_stops_on_condition(schedule):
ids = schedule.extract_stop_ids_on_attributes(conditions=[{'x': (0, 4)}, {'y': (0, 2)}], how=all)
assert set(ids) == {'5', '6', '1', '2'}
def test_getting_services_on_modal_condition(schedule):
service_ids = schedule.services_on_modal_condition(modes='bus')
assert service_ids == ['service']
def test_getting_routes_on_modal_condition(schedule):
route_ids = schedule.routes_on_modal_condition(modes='bus')
assert set(route_ids) == {'1', '2'}
def test_getting_stops_on_modal_condition(schedule):
stop_ids = schedule.stops_on_modal_condition(modes='bus')
assert set(stop_ids) == {'5', '6', '7', '8', '3', '1', '4', '2'}
test_geojson = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "test_geojson.geojson"))
def test_getting_stops_on_spatial_condition_with_geojson(schedule, mocker):
mocker.patch.object(spatial, 'read_geojson_to_shapely',
return_value=GeometryCollection(
[Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])]))
stops = schedule.stops_on_spatial_condition(test_geojson)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_shapely_polygon(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
stops = schedule.stops_on_spatial_condition(p)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_stops_on_spatial_condition_with_s2_hex_region(schedule):
s2_region = '4837,4839,483f5,4844,4849'
stops = schedule.stops_on_spatial_condition(s2_region)
assert set(stops) == {'5', '6', '7', '8', '2', '4', '3', '1'}
def test_getting_routes_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p)
assert set(routes) == {'1', '2'}
def test_getting_routes_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.routes_on_spatial_condition(p, how='within')
assert set(routes) == {'1', '2'}
def test_getting_services_intersecting_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p)
assert set(routes) == {'service'}
def test_getting_services_contained_spatial_region(schedule):
p = Polygon([(-7.6, 49.7), (-7.4, 49.7), (-7.4, 49.8), (-7.6, 49.8), (-7.6, 49.7)])
routes = schedule.services_on_spatial_condition(p, how='within')
assert set(routes) == {'service'}
def test_applying_attributes_to_service(schedule):
assert schedule._graph.graph['services']['service']['name'] == 'name'
assert schedule['service'].name == 'name'
schedule.apply_attributes_to_services({'service': {'name': 'new_name'}})
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_attributes_changing_id_to_service_throws_error(schedule):
assert 'service' in schedule._graph.graph['services']
assert schedule._graph.graph['services']['service']['id'] == 'service'
assert schedule['service'].id == 'service'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_services({'service': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_route(schedule):
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'name'
assert schedule.route('1').route_short_name == 'name'
schedule.apply_attributes_to_routes({'1': {'route_short_name': 'new_name'}})
assert schedule._graph.graph['routes']['1']['route_short_name'] == 'new_name'
assert schedule.route('1').route_short_name == 'new_name'
def test_applying_mode_attributes_to_route_results_in_correct_mode_methods(schedule):
assert schedule.route('1').mode == 'bus'
assert schedule.modes() == {'bus'}
assert schedule.mode_graph_map() == {
'bus': {('3', '4'), ('2', '3'), ('1', '2'), ('6', '7'), ('5', '6'), ('7', '8')}}
schedule.apply_attributes_to_routes({'1': {'mode': 'new_bus'}})
assert schedule.route('1').mode == 'new_bus'
assert schedule.modes() == {'bus', 'new_bus'}
assert schedule['service'].modes() == {'bus', 'new_bus'}
assert schedule.mode_graph_map() == {'bus': {('7', '8'), ('6', '7'), ('5', '6')},
'new_bus': {('3', '4'), ('1', '2'), ('2', '3')}}
assert schedule['service'].mode_graph_map() == {'bus': {('6', '7'), ('7', '8'), ('5', '6')},
'new_bus': {('3', '4'), ('2', '3'), ('1', '2')}}
def test_applying_attributes_changing_id_to_route_throws_error(schedule):
assert '1' in schedule._graph.graph['routes']
assert schedule._graph.graph['routes']['1']['id'] == '1'
assert schedule.route('1').id == '1'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'1': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def test_applying_attributes_to_stop(schedule):
assert schedule._graph.nodes['5']['name'] == ''
assert schedule.stop('5').name == ''
schedule.apply_attributes_to_stops({'5': {'name': 'new_name'}})
assert schedule._graph.nodes['5']['name'] == 'new_name'
assert schedule.stop('5').name == 'new_name'
def test_applying_attributes_changing_id_to_stop_throws_error(schedule):
assert '5' in schedule._graph.nodes
assert schedule._graph.nodes['5']['id'] == '5'
assert schedule.stop('5').id == '5'
with pytest.raises(NotImplementedError) as e:
schedule.apply_attributes_to_routes({'5': {'id': 'new_id'}})
assert 'Changing id can only be done via the `reindex` method' in str(e.value)
def change_name(attrib):
return 'new_name'
def test_applying_function_to_services(schedule):
schedule.apply_function_to_services(function=change_name, location='name')
assert schedule._graph.graph['services']['service']['name'] == 'new_name'
assert schedule['service'].name == 'new_name'
def test_applying_function_to_routes(schedule):
schedule.apply_function_to_routes(function=change_name, location='route_short_name')
for route in schedule.routes():
assert schedule._graph.graph['routes'][route.id]['route_short_name'] == 'new_name'
assert route.route_short_name == 'new_name'
def test_applying_function_to_stops(schedule):
schedule.apply_function_to_stops(function=change_name, location='name')
for stop in schedule.stops():
assert stop.name == 'new_name'
assert schedule._graph.nodes[stop.id]['name'] == 'new_name'
def test_adding_service(schedule, service):
service.reindex('different_service')
service.route('1').reindex('different_service_1')
service.route('2').reindex('different_service_2')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_route_ids(schedule, service):
service.reindex('different_service')
schedule.add_service(service)
assert set(schedule.route_ids()) == {'1', '2', 'different_service_1', 'different_service_2'}
assert set(schedule.service_ids()) == {'service', 'different_service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service',
'different_service_1': 'different_service', 'different_service_2': 'different_service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2'],
'different_service': ['different_service_1', 'different_service_2']})
def test_adding_service_with_clashing_id_throws_error(schedule, service):
with pytest.raises(ServiceIndexError) as e:
schedule.add_service(service)
assert 'already exists' in str(e.value)
def test_adding_service_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service', 'some_id'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service', 'some_id'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
s = Service(id='some_id', routes=[r])
schedule.add_service(s, force=True)
assert_semantically_equal(dict(s.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(s.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'some_id', 'service'}})
assert_semantically_equal(s.graph()['2']['5'], {'routes': {'3'}, 'services': {'some_id'}})
def test_adding_service_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_service(Service(id='some_id', routes=[r]))
assert 'The following stops would inherit data' in str(e.value)
def test_removing_service(schedule):
schedule.remove_service('service')
assert not set(schedule.route_ids())
assert not set(schedule.service_ids())
assert not schedule._graph.graph['route_to_service_map']
assert not schedule._graph.graph['service_to_route_map']
def test_adding_route(schedule, route):
route.reindex('new_id')
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'new_id'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'new_id': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'new_id']})
def test_adding_route_with_clashing_id(schedule, route):
schedule.add_route('service', route)
assert set(schedule.route_ids()) == {'1', '2', 'service_3'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service', '2': 'service', 'service_3': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1', '2', 'service_3']})
def test_adding_route_to_non_existing_service_throws_error(schedule, route):
with pytest.raises(ServiceIndexError) as e:
schedule.add_route('service_that_doesnt_exist', route)
assert 'does not exist' in str(e.value)
def test_creating_a_route_to_add_using_id_references_to_existing_stops_inherits_schedule_stops_data(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['1', '2', '5']
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}}, '2': {'routes': {'3'}}, '5': {'routes': {'3'}}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_creating_a_route_to_add_giving_existing_schedule_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[schedule.stop('1'), schedule.stop('2'), schedule.stop('5')]
)
assert r.ordered_stops == ['1', '2', '5']
assert_semantically_equal(dict(r._graph.nodes(data=True)),
{'1': {'routes': {'3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'routes': {'3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'5': {'routes': {'3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700', 'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()}})
assert_semantically_equal(r._graph.edges(data=True)._adjdict,
{'1': {'2': {'routes': {'3'}}}, '2': {'5': {'routes': {'3'}}}, '5': {}})
schedule.add_route('service', r)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_does_not_overwrite_existing_stops(schedule):
expected_stops_data = {
'5': {'services': {'service'}, 'routes': {'2', '3'}, 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '1', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.76682779861249, 'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1', '3'}, 'id': '2', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '',
'lat': 49.766825803756994, 'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()}}
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
assert r.ordered_stops == ['1', '2', '5']
schedule.add_route('service', r, force=True)
assert_semantically_equal(dict(r.graph().nodes(data=True)), expected_stops_data)
assert_semantically_equal(r.graph()['1']['2'], {'routes': {'1', '3'}, 'services': {'service'}})
assert_semantically_equal(r.graph()['2']['5'], {'routes': {'3'}, 'services': {'service'}})
def test_adding_route_with_clashing_stops_data_only_flags_those_that_are_actually_different(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=4, y=2, epsg='epsg:27700', name='')]
)
assert r.ordered_stops == ['1', '2', '5']
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert "The following stops would inherit data currently stored under those Stop IDs in the Schedule: " \
"['1', '2']" in str(e.value)
def test_adding_route_with_clashing_stops_data_without_force_flag_throws_error(schedule):
r = Route(
id='3',
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=[Stop(id='1', x=1, y=2, epsg='epsg:27700'),
Stop(id='2', x=0, y=1, epsg='epsg:27700'),
Stop(id='5', x=0, y=2, epsg='epsg:27700')]
)
with pytest.raises(ConflictingStopData) as e:
schedule.add_route('service', r)
assert 'The following stops would inherit data' in str(e.value)
def test_extracting_epsg_from_an_intermediate_route_gives_none():
# intermediate meaning not belonging to a schedule yet but referring to stops in a schedule
r = Route(
route_short_name='name',
mode='bus',
trips={},
arrival_offsets=[],
departure_offsets=[],
stops=['S1', 'S2', 'S3']
)
assert r.epsg is None
def test_removing_route(schedule):
schedule.remove_route('2')
assert set(schedule.route_ids()) == {'1'}
assert set(schedule.service_ids()) == {'service'}
assert_semantically_equal(schedule._graph.graph['route_to_service_map'],
{'1': 'service'})
assert_semantically_equal(schedule._graph.graph['service_to_route_map'],
{'service': ['1']})
def test_removing_route_updates_services_on_nodes_and_edges(schedule):
schedule.remove_route('2')
assert_semantically_equal(dict(schedule.graph().nodes(data=True)),
{'5': {'services': set(), 'routes': set(), 'id': '5', 'x': 4.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set()},
'6': {'services': set(), 'routes': set(), 'id': '6', 'x': 1.0, 'y': 2.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set()},
'7': {'services': set(), 'routes': set(), 'id': '7', 'x': 3.0, 'y': 3.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set()},
'8': {'services': set(), 'routes': set(), 'id': '8', 'x': 7.0, 'y': 5.0, 'epsg': 'epsg:27700',
'name': '', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set()},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76683608549253,
'lon': -7.557121424907424, 's2_id': 5205973754090203369,
'additional_attributes': set()},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.76682779861249,
'lon': -7.557106577683727, 's2_id': 5205973754090531959,
'additional_attributes': set()},
'2': {'services': {'service'}, 'routes': {'1'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766825803756994,
'lon': -7.557148039524952, 's2_id': 5205973754090365183,
'additional_attributes': set()},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'name': '', 'lat': 49.766856648946295,
'lon': -7.5570681956375, 's2_id': 5205973754097123809,
'additional_attributes': set()}})
assert_semantically_equal(schedule.graph().edges(data=True)._adjdict,
{'5': {'6': {'services': set(), 'routes': set()}},
'6': {'7': {'services': set(), 'routes': set()}},
'7': {'8': {'services': set(), 'routes': set()}}, '8': {},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}},
'2': {'3': {'services': {'service'}, 'routes': {'1'}}}, '4': {}})
def test_removing_stop(schedule):
schedule.remove_stop('5')
assert {stop.id for stop in schedule.stops()} == {'1', '3', '4', '7', '8', '6', '2'}
def test_removing_unused_stops(schedule):
schedule.remove_route('1')
schedule.remove_unsused_stops()
assert {stop.id for stop in schedule.stops()} == {'6', '8', '5', '7'}
def test_iter_stops_returns_stops_objects(test_service, different_test_service):
schedule = Schedule(services=[test_service, different_test_service], epsg='epsg:4326')
assert set([stop.id for stop in schedule.stops()]) == {'0', '1', '2', '3', '4'}
assert all([isinstance(stop, Stop) for stop in schedule.stops()])
def test_read_matsim_schedule_returns_expected_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
epsg='epsg:27700')
correct_services = Service(id='10314', routes=[
Route(
route_short_name='12', id='VJbd8660f05fe6f744e58a66ae12bd66acbca88b98',
mode='bus',
stops=[Stop(id='26997928P', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700'),
Stop(id='26997928P.link:1', x='528464.1342843144', y='182179.7435136598', epsg='epsg:27700')],
route=['1'],
trips={'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'],
'vehicle_id': ['veh_0_bus']},
arrival_offsets=['00:00:00', '00:02:00'],
departure_offsets=['00:00:00', '00:02:00']
)
])
for val in schedule.services():
assert val == correct_services
assert_semantically_equal(schedule.stop_to_service_ids_map(),
{'26997928P.link:1': {'10314'}, '26997928P': {'10314'}})
assert_semantically_equal(schedule.stop_to_route_ids_map(),
{'26997928P': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'26997928P.link:1': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'}})
assert_semantically_equal(schedule.route('VJbd8660f05fe6f744e58a66ae12bd66acbca88b98').trips,
{'trip_id': ['VJ00938baa194cee94700312812d208fe79f3297ee_04:40:00'],
'trip_departure_time': ['04:40:00'], 'vehicle_id': ['veh_0_bus']})
assert_semantically_equal(
dict(schedule.graph().nodes(data=True)),
{'26997928P': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P', 'x': 528464.1342843144, 'y': 182179.7435136598, 'epsg': 'epsg:27700',
'name': '<NAME> (Stop P)', 'lat': 51.52393050617373, 'lon': -0.14967658860132668,
's2_id': 5221390302759871369, 'additional_attributes': {'name', 'isBlocking'},
'isBlocking': 'false'},
'26997928P.link:1': {'services': {'10314'}, 'routes': {'VJbd8660f05fe6f744e58a66ae12bd66acbca88b98'},
'id': '26997928P.link:1', 'x': 528464.1342843144, 'y': 182179.7435136598,
'epsg': 'epsg:27700', 'name': 'Brunswick Place (Stop P)', 'lat': 51.52393050617373,
'lon': -0.14967658860132668, 's2_id': 5221390302759871369,
'additional_attributes': {'name', 'linkRefId', 'isBlocking'}, 'linkRefId': '1',
'isBlocking': 'false'}}
)
def test_reading_vehicles_with_a_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_reading_vehicles_after_reading_schedule():
schedule = read.read_matsim_schedule(
path_to_schedule=pt2matsim_schedule_file,
path_to_vehicles=pt2matsim_vehicles_file,
epsg='epsg:27700')
assert_semantically_equal(schedule.vehicles, {'veh_0_bus': {'type': 'bus'}})
assert_semantically_equal(schedule.vehicle_types['bus'], {
'capacity': {'seats': {'persons': '71'}, 'standingRoom': {'persons': '1'}},
'length': {'meter': '18.0'},
'width': {'meter': '2.5'},
'accessTime': {'secondsPerPerson': '0.5'},
'egressTime': {'secondsPerPerson': '0.5'},
'doorOperation': {'mode': 'serial'},
'passengerCarEquivalents': {'pce': '2.8'}})
def test_is_strongly_connected_with_strongly_connected_schedule(strongly_connected_schedule):
assert strongly_connected_schedule.is_strongly_connected()
def test_is_strongly_connected_with_not_strongly_connected_schedule(schedule):
assert not schedule.is_strongly_connected()
def test_has_self_loops_with_self_has_self_looping_schedule(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
assert s.has_self_loops()
def test_has_self_loops_returns_self_looping_stops(self_looping_route):
s = Schedule('epsg:27700', [Service(id='service', routes=[self_looping_route])])
loop_nodes = s.has_self_loops()
assert loop_nodes == ['1']
def test_has_self_loops_with_non_looping_routes(schedule):
assert not schedule.has_self_loops()
def test_validity_of_services(self_looping_route, route):
s = Schedule('epsg:27700', [Service(id='1', routes=[self_looping_route]),
Service(id='2', routes=[route])])
assert not s['1'].is_valid_service()
assert s['2'].is_valid_service()
assert set(s.validity_of_services()) == {False, True}
def test_has_valid_services(schedule):
assert not schedule.has_valid_services()
def test_has_valid_services_with_only_valid_services(service):
s = Schedule('epsg:27700', [service])
assert s.has_valid_services()
def test_invalid_services_shows_invalid_services(service):
for route_id in service.route_ids():
service._graph.graph['routes'][route_id]['route'] = ['1']
s = Schedule('epsg:27700', [service])
assert s.invalid_services() == [service]
def test_is_valid_with_valid_schedule(service):
s = Schedule('epsg:27700', [service])
assert s.is_valid_schedule()
def test_generate_validation_report_delegates_to_method_in_schedule_operations(mocker, schedule):
mocker.patch.object(schedule_validation, 'generate_validation_report')
schedule.generate_validation_report()
schedule_validation.generate_validation_report.assert_called_once()
def test_build_graph_builds_correct_graph(strongly_connected_schedule):
g = strongly_connected_schedule.graph()
assert_semantically_equal(dict(g.nodes(data=True)),
{'5': {'services': {'service'}, 'routes': {'2'}, 'id': '5', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_5'},
'2': {'services': {'service'}, 'routes': {'1', '2'}, 'id': '2', 'x': 1.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.766825803756994, 'lon': -7.557148039524952,
's2_id': 5205973754090365183, 'additional_attributes': set(), 'name': 'Stop_2'},
'7': {'services': {'service'}, 'routes': {'2'}, 'id': '7', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_7'},
'8': {'services': {'service'}, 'routes': {'2'}, 'id': '8', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_8'},
'3': {'services': {'service'}, 'routes': {'1'}, 'id': '3', 'x': 3.0, 'y': 3.0,
'epsg': 'epsg:27700', 'lat': 49.76683608549253, 'lon': -7.557121424907424,
's2_id': 5205973754090203369, 'additional_attributes': set(), 'name': 'Stop_3'},
'1': {'services': {'service'}, 'routes': {'1'}, 'id': '1', 'x': 4.0, 'y': 2.0,
'epsg': 'epsg:27700', 'lat': 49.76682779861249, 'lon': -7.557106577683727,
's2_id': 5205973754090531959, 'additional_attributes': set(), 'name': 'Stop_1'},
'4': {'services': {'service'}, 'routes': {'1'}, 'id': '4', 'x': 7.0, 'y': 5.0,
'epsg': 'epsg:27700', 'lat': 49.766856648946295, 'lon': -7.5570681956375,
's2_id': 5205973754097123809, 'additional_attributes': set(), 'name': 'Stop_4'}})
assert_semantically_equal(g.edges(data=True)._adjdict,
{'5': {'2': {'services': {'service'}, 'routes': {'2'}}},
'2': {'7': {'services': {'service'}, 'routes': {'2'}},
'3': {'services': {'service'}, 'routes': {'1'}}},
'7': {'8': {'services': {'service'}, 'routes': {'2'}}},
'8': {'5': {'services': {'service'}, 'routes': {'2'}}},
'4': {'1': {'services': {'service'}, 'routes': {'1'}}},
'1': {'2': {'services': {'service'}, 'routes': {'1'}}},
'3': {'4': {'services': {'service'}, 'routes': {'1'}}}})
def test_building_trips_dataframe(schedule):
df = schedule.route_trips_with_stops_to_dataframe()
correct_df = DataFrame({'departure_time': {0: Timestamp('1970-01-01 13:00:00'), 1: | Timestamp('1970-01-01 13:05:00') | pandas.Timestamp |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import copy
import pathlib
import pandas as pd
import numpy as np
from .order import Order
"""
Position module
"""
"""
current state of position
a typical example is :{
<instrument_id>: {
'count': <how many days the security has been hold>,
'amount': <the amount of the security>,
'price': <the close price of security in the last trading day>,
'weight': <the security weight of total position value>,
},
}
"""
class Position:
"""Position"""
def __init__(self, cash=0, position_dict={}, today_account_value=0):
# NOTE: The position dict must be copied!!!
# Otherwise the initial value
self.init_cash = cash
self.position = position_dict.copy()
self.position["cash"] = cash
self.position["today_account_value"] = today_account_value
def init_stock(self, stock_id, amount, price=None):
self.position[stock_id] = {}
self.position[stock_id]["count"] = 0 # update count in the end of this date
self.position[stock_id]["amount"] = amount
self.position[stock_id]["price"] = price
self.position[stock_id]["weight"] = 0 # update the weight in the end of the trade date
def buy_stock(self, stock_id, trade_val, cost, trade_price):
trade_amount = trade_val / trade_price
if stock_id not in self.position:
self.init_stock(stock_id=stock_id, amount=trade_amount, price=trade_price)
else:
# exist, add amount
self.position[stock_id]["amount"] += trade_amount
self.position["cash"] -= trade_val + cost
def sell_stock(self, stock_id, trade_val, cost, trade_price):
trade_amount = trade_val / trade_price
if stock_id not in self.position:
raise KeyError("{} not in current position".format(stock_id))
else:
# decrease the amount of stock
self.position[stock_id]["amount"] -= trade_amount
# check if to delete
if self.position[stock_id]["amount"] < -1e-5:
raise ValueError(
"only have {} {}, require {}".format(self.position[stock_id]["amount"], stock_id, trade_amount)
)
elif abs(self.position[stock_id]["amount"]) <= 1e-5:
self.del_stock(stock_id)
self.position["cash"] += trade_val - cost
def del_stock(self, stock_id):
del self.position[stock_id]
def update_order(self, order, trade_val, cost, trade_price):
# handle order, order is a order class, defined in exchange.py
if order.direction == Order.BUY:
# BUY
self.buy_stock(order.stock_id, trade_val, cost, trade_price)
elif order.direction == Order.SELL:
# SELL
self.sell_stock(order.stock_id, trade_val, cost, trade_price)
else:
raise NotImplementedError("do not suppotr order direction {}".format(order.direction))
def update_stock_price(self, stock_id, price):
self.position[stock_id]["price"] = price
def update_stock_count(self, stock_id, count):
self.position[stock_id]["count"] = count
def update_stock_weight(self, stock_id, weight):
self.position[stock_id]["weight"] = weight
def update_cash(self, cash):
self.position["cash"] = cash
def calculate_stock_value(self):
stock_list = self.get_stock_list()
value = 0
for stock_id in stock_list:
value += self.position[stock_id]["amount"] * self.position[stock_id]["price"]
return value
def calculate_value(self):
value = self.calculate_stock_value()
value += self.position["cash"]
return value
def get_stock_list(self):
stock_list = list(set(self.position.keys()) - {"cash", "today_account_value"})
return stock_list
def get_stock_price(self, code):
return self.position[code]["price"]
def get_stock_amount(self, code):
return self.position[code]["amount"]
def get_stock_count(self, code):
return self.position[code]["count"]
def get_stock_weight(self, code):
return self.position[code]["weight"]
def get_cash(self):
return self.position["cash"]
def get_stock_amount_dict(self):
"""generate stock amount dict {stock_id : amount of stock}"""
d = {}
stock_list = self.get_stock_list()
for stock_code in stock_list:
d[stock_code] = self.get_stock_amount(code=stock_code)
return d
def get_stock_weight_dict(self, only_stock=False):
"""get_stock_weight_dict
generate stock weight fict {stock_id : value weight of stock in the position}
it is meaningful in the beginning or the end of each trade date
:param only_stock: If only_stock=True, the weight of each stock in total stock will be returned
If only_stock=False, the weight of each stock in total assets(stock + cash) will be returned
"""
if only_stock:
position_value = self.calculate_stock_value()
else:
position_value = self.calculate_value()
d = {}
stock_list = self.get_stock_list()
for stock_code in stock_list:
d[stock_code] = self.position[stock_code]["amount"] * self.position[stock_code]["price"] / position_value
return d
def add_count_all(self):
stock_list = self.get_stock_list()
for code in stock_list:
self.position[code]["count"] += 1
def update_weight_all(self):
weight_dict = self.get_stock_weight_dict()
for stock_code, weight in weight_dict.items():
self.update_stock_weight(stock_code, weight)
def save_position(self, path, last_trade_date):
path = pathlib.Path(path)
p = copy.deepcopy(self.position)
cash = pd.Series(dtype=float)
cash["init_cash"] = self.init_cash
cash["cash"] = p["cash"]
cash["today_account_value"] = p["today_account_value"]
cash["last_trade_date"] = str(last_trade_date.date()) if last_trade_date else None
del p["cash"]
del p["today_account_value"]
positions = | pd.DataFrame.from_dict(p, orient="index") | pandas.DataFrame.from_dict |
import numpy as np
from scipy.special import expit as sigmoid
import numpyro.handlers as numpyro
import pandas as pd
import pytest
import torch
from jax import random
import pyro.poutine as poutine
from brmp import define_model, brm, makedesc
from brmp.backend import data_from_numpy
from brmp.design import (Categorical, CategoricalCoding, Integral,
NumericCoding, RealValued, code_lengths, code_terms,
coef_names, dummy_df, make_column_lookup, makedata,
metadata_from_cols, metadata_from_df)
from brmp.family import (LKJ, Bernoulli, Binomial, HalfCauchy, HalfNormal,
Normal, StudentT, Poisson)
from brmp.fit import Samples
from brmp.formula import Formula, OrderedSet, Term, _1, allfactors, parse
from brmp.model import parameters, scalar_parameter_map, scalar_parameter_names
from brmp.model_pre import build_model_pre
from brmp.numpyro_backend import backend as numpyro_backend
from brmp.priors import Prior, build_prior_tree
from brmp.pyro_backend import backend as pyro_backend
from pyro.distributions import Independent
def assert_equal(a, b):
assert type(a) == np.ndarray or type(a) == torch.Tensor
assert type(a) == type(b)
if type(a) == np.ndarray:
assert (a == b).all()
else:
assert torch.equal(a, b)
default_params = dict(
Normal=dict(loc=0., scale=1.),
Cauchy=dict(loc=0., scale=1.),
HalfCauchy=dict(scale=3.),
HalfNormal=dict(scale=1.),
LKJ=dict(eta=1.),
Beta=dict(concentration1=1., concentration0=1.),
StudentT=dict(df=3., loc=0., scale=1.),
)
# Makes list of columns metadata that includes an entry for every
# factor in `formula`. Any column not already in `cols` is assumed to
# be `RealValued`.
def expand_columns(formula, cols):
lookup = make_column_lookup(cols)
return [lookup.get(factor, RealValued(factor))
for factor in allfactors(formula)]
codegen_cases = [
# TODO: This (and similar examples below) can't be expressed with
# the current parser. Is it useful to fix this (`y ~ -1`?), or can
# these be dropped?
# (Formula('y', [], []), [], [], ['sigma']),
('y ~ 1 + x', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Integer valued predictor.
('y ~ 1 + x', [Integral('x', min=0, max=10)], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
# Groups with fewer than two terms don't sample the (Cholesky
# decomp. of the) correlation matrix.
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
('y ~ 1 | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Integers as categorical levels.
('y ~ 1 | z', [Categorical('z', [10, 20])], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z',
[Categorical('x', list('ab')), Categorical('z', list('ab'))],
{}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 | z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 || z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 + x4 | z1) + (1 + x5 | z2)',
[Categorical('z1', list('ab')), Categorical('z2', list('ab'))],
{},
Normal,
[],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('z_1', 'Normal', {}),
('sd_1_0', 'HalfCauchy', {}),
('L_1', 'LKJ', {})]),
('y ~ 1 | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Custom priors.
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b',), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'intercept'), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('b_1', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'x1'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1',
[], {},
Normal,
[Prior(('b',), StudentT(3., 0., 1.))],
[('b_0', 'StudentT', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of a factor.
('y ~ 1 + x',
[Categorical('x', list('ab'))],
{},
Normal,
[Prior(('b', 'x[b]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of an interaction.
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{},
Normal,
[Prior(('b', 'x1[b]:x2[c]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on group level `sd` choice.
('y ~ 1 + x2 + x3 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfCauchy(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x2 + x3 || x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {})]),
('y ~ 1 + x || a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('sd', 'a:b', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {})]),
# Prior on L.
('y ~ 1 + x2 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('cor',), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {'eta': 2.})]),
('y ~ 1 + x | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('cor', 'a:b'), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {'eta': 2.})]),
# Prior on parameter of response distribution.
('y ~ x',
[],
{},
Normal,
[Prior(('resp', 'sigma'), HalfCauchy(4.))],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {'scale': 4.})]),
# Custom response family.
('y ~ x',
[],
{},
Normal(sigma=0.5),
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Categorical('y', list('AB'))],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=1)],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=10)],
{},
Binomial(num_trials=10),
[],
[('b_0', 'Cauchy', {})]),
('y ~ 1 + x',
[Integral('y', min=0, max=10), Integral('x', min=0, max=10)],
{},
Poisson,
[],
[('b_0', 'Cauchy', {})]),
# Contrasts
('y ~ a',
[Categorical('a', ['a1', 'a2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal,
[Prior(('b', 'a[custom.1]'), Normal(0., 1.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ a + (a | b)',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal, [
Prior(('b', 'a[custom.1]'), Normal(0., 1.)),
Prior(('sd', 'b', 'a[custom.0]'), HalfCauchy(4.))
],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('sigma', 'HalfCauchy', {})]),
]
# Map generic family names to backend specific names.
def pyro_family_name(name):
return dict(LKJ='LKJCorrCholesky').get(name, name)
def numpyro_family_name(name):
return dict(LKJ='LKJCholesky',
Bernoulli='BernoulliProbs',
Binomial='BinomialProbs').get(name, name)
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_pyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
# Generate the model from the column information rather than from
# the metadata extracted from `df`. Since N is small, the metadata
# extracted from `df` might loose information compared to the full
# metadata derived from `cols` (e.g. levels of a categorical
# column) leading to unexpected results. e.g. Missing levels might
# cause correlations not to be modelled, even thought they ought
# to be given the full metadata.
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = pyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(pyro_backend, makedata(formula, df, metadata, contrasts))
trace = poutine.trace(modelfn).get_trace(**data)
# Check that y is correctly observed.
y_node = trace.nodes['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == family.name
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
assert set(trace.stochastic_nodes) - {'obs'} == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = unwrapfn(trace.nodes[site]['fn'])
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == pyro_family_name(family_name)
for (name, expected_val) in params.items():
val = fn.__getattribute__(name)
assert_equal(val, torch.tensor(expected_val).expand(val.shape))
def unwrapfn(fn):
return unwrapfn(fn.base_dist) if type(fn) == Independent else fn
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_numpyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = numpyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(numpyro_backend, makedata(formula, df, metadata, contrasts))
rng = random.PRNGKey(0)
trace = numpyro.trace(numpyro.seed(modelfn, rng)).get_trace(**data)
# Check that y is correctly observed.
y_node = trace['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == numpyro_family_name(family.name)
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
sample_sites = [name for name, node in trace.items() if not node['is_observed']]
assert set(sample_sites) == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = trace[site]['fn']
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == numpyro_family_name(family_name)
for (name, expected_val) in params.items():
if family_name == 'LKJ':
assert name == 'eta'
name = 'concentration'
val = fn.__getattribute__(name)
assert_equal(val._value, np.broadcast_to(expected_val, val.shape))
@pytest.mark.parametrize('formula_str, cols, expected', [
('y ~ 1 + x',
[],
lambda df, coef: coef('b_intercept') + df['x'] * coef('b_x')),
('y ~ a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: ((df['a'] == 'a0') * coef('b_a[a0]') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ 1 + a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ x1:x2',
[],
lambda df, coef: df['x1'] * df['x2'] * coef('b_x1:x2')),
('y ~ a:x',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: (((df['a'] == 'a0') * df['x'] * coef('b_a[a0]:x')) +
((df['a'] == 'a1') * df['x'] * coef('b_a[a1]:x')))),
('y ~ 1 + x | a',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: ((df['a'] == 'a0') * (coef('r_a[a0,intercept]') + df['x'] * coef('r_a[a0,x]')) +
(df['a'] == 'a1') * (coef('r_a[a1,intercept]') + df['x'] * coef('r_a[a1,x]')))),
('y ~ 1 + x | a:b',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (((df['a'] == 'a0') & (df['b'] == 'b0')) *
(coef('r_a:b[a0_b0,intercept]') + df['x'] * coef('r_a:b[a0_b0,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b0')) *
(coef('r_a:b[a1_b0,intercept]') + df['x'] * coef('r_a:b[a1_b0,x]')) +
((df['a'] == 'a0') & (df['b'] == 'b1')) *
(coef('r_a:b[a0_b1,intercept]') + df['x'] * coef('r_a:b[a0_b1,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b1')) *
(coef('r_a:b[a1_b1,intercept]') + df['x'] * coef('r_a:b[a1_b1,x]')))),
('y ~ 1 + (x1 | a) + (x2 | b)',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a0') * df['x1'] * coef('r_a[a0,x1]') +
(df['a'] == 'a1') * df['x1'] * coef('r_a[a1,x1]') +
(df['b'] == 'b0') * df['x2'] * coef('r_b[b0,x2]') +
(df['b'] == 'b1') * df['x2'] * coef('r_b[b1,x2]'))),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_mu_correctness(formula_str, cols, backend, expected):
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df).prior(num_samples=1, backend=backend)
# Pick out the one (and only) sample drawn.
actual_mu = fit.fitted(what='linear')[0]
# `expected` is assumed to return a data frame.
expected_mu = expected(df, fit.get_scalar_param).to_numpy(np.float32)
assert np.allclose(actual_mu, expected_mu)
@pytest.mark.parametrize('cols, family, expected', [
([],
Normal,
lambda mu: mu),
([Integral('y', min=0, max=1)],
Bernoulli,
lambda mu: sigmoid(mu)),
([Integral('y', min=0, max=5)],
Binomial(num_trials=5),
lambda mu: sigmoid(mu) * 5),
([Integral('y', min=0, max=5)],
Poisson,
lambda mu: np.exp(mu)),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_expectation_correctness(cols, family, expected, backend):
formula_str = 'y ~ 1 + x'
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df, family=family).prior(num_samples=1, backend=backend)
actual_expectation = fit.fitted(what='expectation')[0]
# We assume (since it's tested elsewhere) that `mu` is computed
# correctly by `fitted`. So given that, we check that `fitted`
# computes the correct expectation.
expected_expectation = expected(fit.fitted('linear')[0])
assert np.allclose(actual_expectation, expected_expectation)
@pytest.mark.parametrize('N', [0, 5])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_sampling_from_prior_smoke(N, backend, formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols) # Use full metadata for same reason given in comment in codegen test.
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
model = backend.gen(desc)
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(backend, makedata(formula, df, metadata, contrasts))
samples = backend.prior(data, model, num_samples=10, seed=None)
assert type(samples) == Samples
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
@pytest.mark.parametrize('fitargs', [
dict(backend=pyro_backend, num_samples=1, algo='prior'),
dict(backend=numpyro_backend, num_samples=1, algo='prior'),
])
def test_parameter_shapes(formula_str, non_real_cols, contrasts, family, priors, expected, fitargs):
# Make dummy data.
N = 5
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
df = dummy_df(cols, N, allow_non_exhaustive=True)
# Define model, and generate a single posterior sample.
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts).gen(fitargs['backend'])
data = model.encode(df)
fit = model.run_algo('prior', data, num_samples=1, seed=None)
num_chains = fitargs.get('num_chains', 1)
# Check parameter sizes.
for parameter in parameters(fit.model_desc):
expected_param_shape = parameter.shape
samples = fit.get_param(parameter.name)
# A single sample is collected by each chain for all cases.
assert samples.shape == (num_chains,) + expected_param_shape
samples_with_chain_dim = fit.get_param(parameter.name, True)
assert samples_with_chain_dim.shape == (num_chains, 1) + expected_param_shape
def test_scalar_param_map_consistency():
formula = parse('y ~ 1 + x1 + (1 + x2 + b | a) + (1 + x1 | a:b)')
non_real_cols = [
Categorical('a', ['a1', 'a2', 'a3']),
Categorical('b', ['b1', 'b2', 'b3']),
]
cols = expand_columns(formula, non_real_cols)
desc = makedesc(formula, metadata_from_cols(cols), Normal, [], {})
params = parameters(desc)
spmap = scalar_parameter_map(desc)
# Check that each entry in the map points to a unique parameter
# position.
param_and_indices_set = set(param_and_indices
for (_, param_and_indices) in spmap)
assert len(param_and_indices_set) == len(spmap)
# Ensure that we have enough entries in the map to cover all of
# the scalar parameters. (The L_i parameters have a funny status.
# We consider them to be parameters, but not scalar parameters.
# This is not planned, rather things just evolved this way. It
# does makes some sense though, since we usually look at R_i
# instead.)
num_scalar_params = sum(np.product(shape)
for name, shape in params
if not name.startswith('L_'))
assert num_scalar_params == len(spmap)
# Check that all indices are valid. (i.e. Within the shape of the
# parameter.)
for scalar_param_name, (param_name, indices) in spmap:
ss = [shape for (name, shape) in params if name == param_name]
assert len(ss) == 1
param_shape = ss[0]
assert len(indices) == len(param_shape)
assert all(i < s for (i, s) in zip(indices, param_shape))
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_scalar_parameter_names_smoke(formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts)
names = scalar_parameter_names(model.desc)
assert type(names) == list
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors', [
('y ~ x', [], Bernoulli, []),
('y ~ x', [Integral('y', min=0, max=2)], Bernoulli, []),
('y ~ x', [Categorical('y', list('abc'))], Bernoulli, []),
('y ~ x', [Categorical('y', list('ab'))], Normal, []),
('y ~ x', [Integral('y', min=0, max=1)], Normal, []),
('y ~ x', [], Binomial(num_trials=1), []),
('y ~ x', [Integral('y', min=-1, max=1)], Binomial(num_trials=1), []),
('y ~ x',
[Integral('y', min=0, max=3)],
Binomial(num_trials=2),
[]),
('y ~ x', [Categorical('y', list('abc'))], Binomial(num_trials=1), []),
('y ~ x', [], Poisson, []),
])
def test_family_and_response_type_checks(formula_str, non_real_cols, family, priors):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
with pytest.raises(Exception, match='not compatible'):
build_model_pre(formula, metadata, family, {})
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors, expected_error', [
('y ~ x',
[],
Normal,
[Prior(('resp', 'sigma'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('sd', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ 1 + x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('cor', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x',
[],
Normal,
[Prior(('b',), Bernoulli(.5))],
r'(?i)invalid prior'),
# This hasn't passed since I moved the family/response checks in
# to the pre-model. The problem is that the support of the
# Binomial response depends on its parameters which aren't fully
# specified in this case, meaning that the family/reponse check
# can't happen, and the prior test that ought to flag that a prior
# is missing is never reached. It's not clear that a "prior
# missing" error is the most helpful error to raise for this case,
# and it's possible that having the family/response test suggest
# that extra parameters ought to be specified is a better idea.
# It's tricky to say though, since this case is a bit of a one
# off, so figuring out a good general solution is tricky. Since
# it's not clear how best to proceed, so I'll punt for now.
pytest.param(
'y ~ x',
[Integral('y', 0, 1)],
Binomial,
[],
r'(?i)prior missing', marks=pytest.mark.xfail),
])
def test_prior_checks(formula_str, non_real_cols, family, priors, expected_error):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
design_metadata = build_model_pre(formula, metadata, family, {})
with pytest.raises(Exception, match=expected_error):
build_prior_tree(design_metadata, priors)
@pytest.mark.parametrize('formula_str, df, metadata_cols, contrasts, expected', [
# (Formula('y', [], []),
# pd.DataFrame(dict(y=[1, 2, 3])),
# dict(X=torch.tensor([[],
# [],
# []]),
# y_obs=torch.tensor([1., 2., 3.]))),
('y ~ 1',
pd.DataFrame(dict(y=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[4.],
[5.],
[6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x + 1',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[1., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0.],
[1., 1., 0.],
[1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
# (Formula('y', [], [Group([], 'x', True)]),
# pd.DataFrame(dict(y=[1, 2, 3],
# x=pd.Categorical(list('ABC')))),
# dict(X=np.array([[],
# [],
# []]),
# y_obs=np.array([1., 2., 3.]),
# J_1=np.array([0, 1, 2]),
# Z_1=np.array([[],
# [],
# []]))),
('y ~ 1 + (1 + x1 | x2)',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([0, 1, 2]),
Z_0=np.array([[1., 0.],
[1., 0.],
[1., 1.]]))),
# The matches brms modulo 0 vs. 1 based indexing.
('y ~ 1 | a:b:c',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical([0, 0, 1]),
b=pd.Categorical([2, 1, 0]),
c=pd.Categorical([0, 1, 2]))),
None,
{},
dict(X=np.array([[], [], []]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([1, 0, 2]),
Z_0=np.array([[1.], [1.], [1.]]))),
# Interactions
# --------------------------------------------------
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# AC BC AD BD
dict(X=np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 D BC BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1 + x2 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 B D BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 1., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-real
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10., 0., 10., 20.]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-int
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10, 0, 10, 20]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-categorical
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 3., 4.]),
x2=pd.Categorical(list('ABAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[0., 2.],
[3., 0.],
[0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# This example is taken from here:
# https://patsy.readthedocs.io/en/latest/R-comparison.html
('y ~ a:x + a:b',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
a=pd.Categorical(list('ABAB')),
b=pd.Categorical(list('CCDD')),
x=np.array([1., 2., 3., 4.]))),
None,
{},
dict(X=np.array([[1., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 2.],
[0., 0., 1., 0., 3., 0.],
[0., 0., 0., 1., 0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# Integer-valued Factors
# --------------------------------------------------
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1, 2, 3],
x1=[4, 5, 6],
x2=[7., 8., 9.])),
None,
{},
dict(X=np.array([[4., 7.],
[5., 8.],
[6., 9.]]),
y_obs=np.array([1., 2., 3.]))),
# Categorical Response
# --------------------------------------------------
('y ~ x',
pd.DataFrame(dict(y=pd.Categorical(list('AAB')),
x=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[2.],
[3.]]),
y_obs=np.array([0., 0., 1.]))),
# Contrasts
# --------------------------------------------------
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
[RealValued('y'), Categorical('a', levels=['a0', 'a1', 'a2'])],
{'a': np.array([[0], [-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1, -2], [0, 1]])},
dict(X=np.array([[-1., -2.],
[-1., -2.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + a + b + a:b',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']),
b=pd.Categorical(['b1', 'b2', 'b2']))),
None,
{'a': np.array([[-1], [1]]), 'b': np.array([[2], [3]])},
dict(X=np.array([[1., -1., 2., -2.],
[1., -1., 3., -3.],
[1., 1., 3., 3.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + (a | b)',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']),
b= | pd.Categorical(['b1', 'b2', 'b2']) | pandas.Categorical |
import pandas as pd
import requests
import plotly.graph_objects as go
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from .models import *
from .forms import CreateUserForm
from django.shortcuts import redirect
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
API_key = '<KEY>'
# https://medium.com/codex/alpha-vantage-an-introduction-to-a-highly-efficient-free-stock-api-6d17f4481bf
def get_monthly_data(symbol):
api_key = '<KEY>'
api_url = f'https://www.alphavantage.co/query?function=TIME_SERIES_MONTHLY_ADJUSTED&symbol={symbol}&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df[f'Monthly Adjusted Time Series']).T
df = df.rename(
columns={'1. open': 'open', '2. high': 'high', '3. low': 'low', '4. close': 'close', '5. volume': 'volume'})
for i in df.columns:
df[i] = df[i].astype(float)
df.index = pd.to_datetime(df.index)
df = df.iloc[::-1]
return df
def get_weekly_data(symbol):
api_key = '<KEY>'
api_url = f'https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY_ADJUSTED&symbol={symbol}&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df[f'Weekly Adjusted Time Series']).T
df = df.rename(
columns={'1. open': 'open', '2. high': 'high', '3. low': 'low', '4. close': 'close', '5. volume': 'volume'})
for i in df.columns:
df[i] = df[i].astype(float)
df.index = pd.to_datetime(df.index)
df = df.iloc[::-1]
return df
def get_daily_data(symbol):
api_key = '<KEY>'
api_url = f'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df[f'Time Series (Daily)']).T
df = df.rename(
columns={'1. open': 'open', '2. high': 'high', '3. low': 'low', '4. close': 'close', '5. volume': 'volume'})
for i in df.columns:
df[i] = df[i].astype(float)
df.index = pd.to_datetime(df.index)
df = df.iloc[::-1]
return df
def recherche_par_mot_cle(mot_cle):
api_key = '<KEY>'
api_url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={mot_cle}&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df['bestMatches'])
df = df.rename(
columns={'1. symbol': 'symbol',
'2. name': 'name',
'3. type': 'type',
'4. region': 'region',
'5. marketOpen': 'marketOpen',
'6. marketClose': 'marketClose',
'7. timezone': 'timezone',
'8. currency': 'currency',
'9. matchScore': 'matchScore'})
return df
def resultat_to_html(df):
html = ''
for index, row in df.iterrows():
html += f'<h1> <a href="/actions/{row["symbol"]}">{row["name"]} : {row["symbol"]} {row["region"]} {row["currency"]}</a><h1>\n'
return html
def get_plot(data):
figure = go.Figure(
data=[
go.Candlestick(
x=data.index,
open=data['open'],
high=data['high'],
low=data['low'],
close=data['close']
)
]
)
graph = figure.to_html()
return graph
def index(request): # http://127.0.0.1:8000
symbol = 'TSLA'
data = get_monthly_data(symbol)
graph = get_plot(data)
# ts = TimeSeries(key=API_key)
# data1 = ts.get_monthly_adjusted('AAPL')
# html = data.to_html()
# html = "<h1>Yooooooooooooo<h1>"
html = graph
return HttpResponse(html)
def home(request):
return render(request, 'home.html', {})
def aboutus(request):
return render(request, 'aboutus.html', {})
def ActionsDaily(request, action):
symbole = action.upper()
data = get_daily_data(symbole)
graph = get_plot(data)
return render(request, 'Actions.html', {'symbole': symbole, 'graph': graph})
def ActionsWeekly(request, action):
symbole = action.upper()
data = get_weekly_data(symbole)
graph = get_plot(data)
return render(request, 'Actions.html', {'symbole': symbole, 'graph': graph})
def ActionsMonthly(request, action):
symbole = action.upper()
data = get_monthly_data(symbole)
graph = get_plot(data)
return render(request, 'Actions.html', {'symbole': symbole, 'graph': graph})
def contact(request):
return render(request, 'contact.html', {})
def team(request):
return render(request, 'team.html', {})
def loginPage(request): # https://jsfiddle.net/ivanov11/dghm5cu7/
if request.user.is_authenticated:
return redirect('home')
else:
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('home')
else:
messages.info(request, 'Username or Password is incorrect')
context = {}
return render(request, 'login.html', context)
def logoutUser(request):
logout(request)
return redirect('login')
def registerPage(request): # https://jsfiddle.net/ivanov11/hzf0jxLg/
if request.user.is_authenticated:
return redirect('home')
else:
form = CreateUserForm()
if request.method == 'POST':
form = CreateUserForm(request.POST)
if form.is_valid():
form.save()
user = form.cleaned_data.get('username')
messages.success(request, 'Account was created for ' + user)
return redirect('login')
context = {'form': form}
return render(request, 'register.html', context)
def userPage(request):
products =Product.objects.all()
context = {'products':products}
return render(request, 'user.html', context)
def get_data_now(symbol):
api_key = '<KEY>'
api_url=f'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={api_key}'
raw_df = requests.get(api_url).json()
df = pd.DataFrame(raw_df[f'Global Quote']).T
df = df.rename(
columns={'2. open': 'open', '3. high': 'high', '4. low': 'low', '5. price': 'price', '6. volume': 'volume'})
for i in df.columns:
df[i] = df[i].astype(float)
df.index = | pd.to_datetime(df.index) | pandas.to_datetime |
import pandas as pd
df1 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet1')
print(df1)
df2 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet2')
print(df2)
print(pd.merge(df1, df2))
df3 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet3')
print(df3)
print(pd.merge(df1, df3, on='编号'))
df4 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet4')
print(df4)
print(pd.merge(df4, df3, on='编号'))
df5 = pd.read_excel("table_join_exp.xlsx", sheet_name='Sheet5')
print(df5)
# 内连接
print( | pd.merge(df5, df3, on='编号', how='inner') | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Toolkit Authors
from abc import ABC
from pydtk.models import BaseModel, register_model
import numpy as np
import pandas as pd
import sys
import datetime
@register_model(priority=1)
class GenericCsvModel(BaseModel, ABC):
"""A generic model for a csv file."""
_content_type = 'text/csv'
_data_type = None # allow any data-type
_file_extensions = ['.csv']
_contents = '.*'
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
start_timestamp (float): timestamp to start loading (not supported)
end_timestamp (float): timestamp to end loading (not supported)
"""
if start_timestamp is not None and end_timestamp is not None:
raise ValueError('Specifying time-range to load is not supported in GenericCsvModel')
data = pd.read_csv(path, header=None).to_numpy()
self.data = data
def _save(self, path, **kwargs):
"""Save ndarray data to a csv file.
Args:
path (str): path to the output csv file
"""
data = pd.DataFrame(self.data)
data.to_csv(path, header=False, index=False)
@property
def timestamps(self):
"""Return timestamps as ndarray."""
# this is prototype
return self.data
def to_ndarray(self):
"""Return data as ndarray."""
return self.data
@classmethod
def generate_contents_meta(cls, path, content_key='content'):
"""Generate contents metadata.
Args:
path (str): File path
content_key (str): Key of content
Returns:
(dict): contents metadata
"""
# Load file
data = pd.read_csv(path)
columns = data.columns.tolist()
# Generate metadata
contents = {content_key: {'columns': columns, 'tags': ['csv']}}
return contents
@classmethod
def generate_timestamp_meta(cls, path):
"""Generate contents metadata.
Args:
path (str): File path
Returns:
(list): [start_timestamp, end_timestamp]
"""
raise NotImplementedError
@register_model(priority=2)
class CameraTimestampCsvModel(GenericCsvModel, ABC):
"""A model for a csv file containing camera timestamps."""
_contents = {'camera/.*': {'tags': ['.*']}}
_columns = ['timestamp']
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
start_timestamp (float): timestamp to start loading (not supported)
end_timestamp (float): timestamp to end loading (not supported)
"""
if start_timestamp is None:
start_timestamp = self.metadata.data['start_timestamp']
if end_timestamp is None:
end_timestamp = self.metadata.data['end_timestamp']
# load csv
super()._load(path=path, **kwargs)
# filter
start_msec, end_msec = start_timestamp * 1000, end_timestamp * 1000 # sec. -> msec.
data = self.data
data = data[np.logical_and(data[:, 0] >= start_msec, data[:, 0] <= end_msec), 0]
# Convert unit (msec. -> sec.)
# Note: CSV file timestamps in "Driving behavior DB" is recorded in msec.
data = data.astype(np.float) * pow(10, -3)
self.data = data
def to_ndarray(self):
"""Return data as ndarray."""
return self.data
@property
def timestamps(self):
"""Return timestamps as ndarray."""
return self.data
@register_model(priority=3)
class AnnotationCsvModel(GenericCsvModel, ABC):
"""A model for a csv file containing annotations."""
_contents = {'.*annotation': {'tags': ['.*']}}
_data_type = "annotation"
_columns = ['Record_ID', 'Annotator_ID', 'Risk_score', 'Subjective_risk_score',
'Scene_description', 'Risk_factor', 'Environmental_tag', 'Behavior_tag']
_nan_convert_map = {'Risk_factor': ''}
def __init__(self, **kwargs):
super(GenericCsvModel, self).__init__(**kwargs)
def _load(self, path, start_timestamp=None, end_timestamp=None, **kwargs):
"""Load a csv file.
Args:
path (str): path to a csv file
"""
data = | pd.read_csv(path) | pandas.read_csv |
import os
import collections
import pandas
import pandas as pd
import matplotlib, seaborn, numpy
from matplotlib import pyplot
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.model_selection import train_test_split
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDClassifier, LogisticRegression, LogisticRegressionCV
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import cross_val_score
import parse_bepipred
def count_aa(string):
total_counts = []
amino_acids = ['H', 'E', 'V', 'A', 'N', 'M', 'K', 'F', 'I', 'P', 'D', 'R', 'Y', 'T', 'S', 'W', 'G', 'C', 'L', 'Q']
for aa in amino_acids:
total_counts.append(string.lower().count(aa.lower()))
return total_counts
def get_vectorized_sequences(list_sequences):
vectorized_sequences = []
for num in range(len(list_sequences)):
sequence = list_sequences[num]
num_aa = count_aa(sequence)
#normalized_num_aa = [c/len(sequence) for c in num_aa]
#num_hydrophobic = []
#final_vector = normalized_num_aa# + [count_attribute(sequence, "charged")] + [count_attribute(sequence, "polar")] + [count_attribute(sequence, "nonpolar")]
#final_vector = [count_attribute(sequence, "charged")] + [count_attribute(sequence, "polar")] + [count_attribute(sequence, "nonpolar")]
vectorized_sequences.append(num_aa)
return vectorized_sequences
class Sequence():
def __init__(self, uniprot_id, start, end, sequence, num_hits):
self.uniprot_id = uniprot_id
self.start = start
self.end = end
self.sequence = sequence
self.num_hits = num_hits
class UniprotGroup():
def __init__(self, list_of_sequences):
self.list_of_sequences = list_of_sequences
sorted_seq = sorted(self.list_of_sequences, key=lambda sequence: sequence.start)
#print(sorted_seq)
i = 0
aa_sequence = ""
while i<len(list_of_sequences):
aa_sequence = aa_sequence + list_of_sequences[i].sequence
i = i+2
if (len(list_of_sequences) % 2 == 0):
index = int(list_of_sequences[-1].start) - int(list_of_sequences[-2].end)
aa_sequence = aa_sequence + list_of_sequences[-1].sequence[index:]
self.aa_sequence = aa_sequence
list_of_uniprot_ids = []
list_of_sequences = []
df = pandas.read_csv("{}/data/hits.binarized.fdr_0.15.w_metadata.csv".format(os.path.dirname(os.path.realpath(__file__))))
print(len(df.uniprot_accession))
for num in range(len(df.uniprot_accession)-4):
#for num in range(20000):
print(num)
#print(df.iloc[num])
uniprot_id = df.uniprot_accession[num]
sequence = df.sequence_aa[num]
start = df.peptide_position[num].split("-")[0]
end = df.peptide_position[num].split("-")[1]
list_of_uniprot_ids.append(uniprot_id)
num_hits = 0
for number in df.iloc[num][4:]:
num_hits = num_hits + int(number)
list_of_sequences.append(Sequence(uniprot_id, start, end, sequence, num_hits))
list_of_uniprot_ids = list(set(list_of_uniprot_ids))
list_of_uniprot_groups = []
for uniprot_id in list_of_uniprot_ids:
new_list_of_sequences = []
for seq in list_of_sequences:
if seq.uniprot_id == uniprot_id:
new_list_of_sequences.append(seq)
list_of_uniprot_groups.append(UniprotGroup(new_list_of_sequences))
summary_data = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import logging
import os
import math
import logging
import sys
import os
import random
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from global_variables import config as g
# ROOT_DIR = os.path.dirname(os.path.abspath("top_level_file.txt"))
ROOT_DIR = g.ROOT_DIR
raw_data_dir = g.raw_data_dir
processed_data_dir = g.processed_data_dir
raw_data_dir = g.raw_data_dir
FILL_NAN_WITH_ZERO = False
FILL_NAN_WITH_MEDIAN = False
FILL_NAN_WITH_MEAN = False
INTERPOLATE_NAN_VALUES = False
DELETE_NAN_ROWS = False
REPLACE_WITH_PREVIOUS_DAY = True
def import_and_merge_data(input_filepath:str=raw_data_dir, output_filepath:str=processed_data_dir):
logger = logging.getLogger("def import_and_merge_data")
da_prices = | pd.read_csv(input_filepath + "da_prices.csv") | pandas.read_csv |
import argparse
from pathlib import Path
import pandas as pd
def get_final_target_list(df: pd.DataFrame,
max_gdtts_min_threshold: float = 0.7,
gdtts_min_threshold: float = 0.4,
num_model_min_threshold: int = 50,
num_target: int = 100):
fil_df = df.query('GDT_TS >= @gdtts_min_threshold')
sel_df = fil_df.groupby('target').filter(
lambda x: x['GDT_TS'].max() >= max_gdtts_min_threshold
and len(x) > num_model_min_threshold)
target_sorted_by_hitnum = sel_df.groupby('target').head(1).sort_values(['3', '2', '1'], ascending=False)
final_target_list = list(target_sorted_by_hitnum[: num_target]['target'])
return final_target_list
def main():
parser = argparse.ArgumentParser()
parser.add_argument('sampling_csv', type=str,
help='csv of sampling models. e.g. ../../../score/dataset/dataset_sampling.csv')
parser.add_argument('--output_target_list_path', type=str,
help='output path of final targets. ',
default='../../../pisces/20210225/' +
'multidomain_target_100_coverage60_gdtts40_num_model_50_cullpdb_pc20_res2.0_R0.25.csv')
parser.add_argument('--pisces_multidomain_csv', type=str,
default='../../../pisces/20210225/multidomain_cullpdb_pc20_res2.0_R0.25.csv',
help='multi-domain target list of pisces.')
parser.add_argument('--pisces_multidomain_hitnum_csv', type=str,
default='../../../pisces/20210225/multidomain_hit_num_cullpdb_pc20_res2.0_R0.25.csv',
help='hit num csv of multi-domain target of pisces')
args = parser.parse_args()
# load tmscore
tmscore_df = pd.read_csv(args.sampling_csv)
# load target info
pisces_multidomain_df = pd.read_csv(args.pisces_multidomain_csv, index_col=0)
pisces_multidomain_df['target'] \
= [row['PDB_ID'] + '_' + row['Chain'] for _, row in pisces_multidomain_df.iterrows()]
pisces_hitnum_df = | pd.read_csv(args.pisces_multidomain_hitnum_csv, index_col=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Sun May 8 18:29:53 2016
@author: bmanubay
"""
# Check what moelcules we have appear in Chris's list
import pandas as pd
# read in ; delimited csv of comp/mix counts created in thermomlcnts.py
a0 = pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_all.csv", sep=';')
a1 = | pd.read_csv("/home/bmanubay/.thermoml/tables/Ken/allcomp_counts_interesting.csv", sep=';') | pandas.read_csv |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
# Find the filepath of all .res NRIXS files
resfilepath_list = [filepath for filepath in glob.glob('*/*.res')]
# Consistency dictionary: For saving space in df
consist_dict = {'ok': 'O', 'acceptable': 'A', 'concerning': 'S'}
# Initialize df structures to store all values from phox .ptl files in
all_fitParam_df = pd.DataFrame()
all_fitQuality_df = pd.DataFrame()
all_valsFromData_df = pd.DataFrame()
all_valsFromRefData_df = pd.DataFrame()
all_valsFromPDOS_df = pd.DataFrame()
# resfilepath = resfilepath_list[27]
for resfilepath in resfilepath_list:
filepath = re.findall('([A-Za-z0-9/_]+/)[A-Za-z0-9_]+.res',resfilepath)[0]
filename = re.findall('/([A-Za-z0-9_]+).res',resfilepath)[0]
ptlfilepath = filepath+'Output/'+filename+'_phox_ptl.txt'
psthfilepath = filepath+'Output/'+filename+'_psth_ptl.txt'
folder = re.findall('([A-Za-z0-9/_]+)/',filepath)[0]
print(folder)
# Get date information from directory names
datetag = re.findall('([A-Za-z0-9]+)_',filepath)[0]
month = re.findall('[A-Za-z]+',datetag)[0]
year = re.findall('[0-9]+',datetag)[0]
# Initialize df structure to store values from phox .ptl file in
fitParam_df = | pd.DataFrame({'Date': [month+' '+year], 'Folder': [folder], 'Index': [filename]}) | pandas.DataFrame |
#!/usr/bin/env python3
import os
import argparse
import pdb
import pickle
import itertools
import pandas as pd
import numpy as np
import scoreblock as sb
import remtools as rt
def predict_scores(std=None, model=None):
"""use a trained classifier (model) to predict scores
Each model can have multiple classifiers (OVO/OVR/LDA/QDA etc..)
input
------
std : StagedTrialData
Featurized data
model : dict
a rather disorganized dict, created by anl-trainmodels.py
returns
------
sb_stk : ScoreBlock
The predicted scores and the corresponding human scores (if they exist)
"""
nameT = std.trial
day = std.tagDict.get('day','xx')
genotype = std.tagDict.get('genotype','xx')
nameM = model['tagDict']['tag']
fmd = model['full_model']
classifier_names = list(fmd['classifiers'].keys())
# features to predict
X = std.features.data.T
# data transforms: standardize and pca
sc = fmd['sc']
pca = fmd['pca']
Xm = X*1.0
if sc is not None:
Xm = sc.transform(Xm)
if pca is not None:
Xm = pca.transform(Xm)
data = []
ndx = []
for nameC in classifier_names:
print('predicting:', nameT, nameM, nameC)
classifier = fmd['classifiers'][nameC]['cls']
data.append(classifier.predict(Xm))
d = dict(trial=nameT, M=nameM, classifier=nameC, genotype=genotype, day=day)
ndx.append(d)
# make a scoreblock (joining index and data) of predicted model scores
data = np.asarray(data)
cols_data = ['ep-%5.5i' % (i+1) for i in range(data.shape[1])]
df_data = pd.DataFrame(data=data, columns=cols_data)
df_ndx = | pd.DataFrame(data=ndx) | pandas.DataFrame |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import logging
import os
import pickle
import numpy as np
import pandas as pd
from sklearn.externals import joblib
import azureml.automl.core
from azureml.automl.core.shared import logging_utilities, log_server
from azureml.telemetry import INSTRUMENTATION_KEY
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.pandas_parameter_type import PandasParameterType
input_sample = pd.DataFrame({"age": pd.Series(["24"], dtype="int64"), "job": pd.Series(["technician"], dtype="object"), "marital": pd.Series(["single"], dtype="object"), "education": pd.Series(["university.degree"], dtype="object"), "default": pd.Series(["no"], dtype="object"), "housing": | pd.Series(["no"], dtype="object") | pandas.Series |
import numpy as np
import pandas as pd
from yeast.transformers import StrToLower, StrToUpper, StrToSentence, StrToTitle, StrTrim
from yeast.transformers import StrSlice, StrReplace, StrRemove, StrRemoveAll, MapValues
from yeast.transformers import DateYear, DateMonth, DateQuarter, DateWeek, DateDay, DateDayOfWeek
from yeast.transformers import DateHour, DateMinute, DateSecond, DateDayOfYear
from yeast.transformers import Round, Ceil, Floor
from data_samples import startrek_data as data
from data_samples import startrek_characters as chars_data
from data_samples import release_dates
def test_str_to_lower(data):
titles = StrToLower().resolve(data, column='title').to_list()
assert 'picard' in titles
assert 'tng' in titles
assert 'voyager' in titles
assert 'enterprise' in titles
assert 'deep space nine' in titles
assert 'discovery' in titles
def test_str_to_upper(data):
titles = StrToUpper('title').resolve(data).to_list()
assert 'PICARD' in titles
assert 'TNG' in titles
assert 'VOYAGER' in titles
assert 'ENTERPRISE' in titles
assert 'DEEP SPACE NINE' in titles
assert 'DISCOVERY' in titles
def test_str_to_sentence(chars_data):
titles = StrToSentence().resolve(chars_data, column='name').to_list()
assert '<NAME>' in titles
assert '<NAME>' in titles
def test_str_to_title(chars_data):
titles = StrToTitle('name').resolve(chars_data).to_list()
assert '<NAME>' in titles
assert '<NAME>' in titles
def test_str_trim(chars_data):
titles = StrTrim().resolve(chars_data, column='name').to_list()
assert 'Data' in titles
def test_str_slice(chars_data):
titles = StrSlice(start=0, stop=2).resolve(chars_data, column='name').to_list()
assert 'JO' in titles
def test_str_replace(chars_data):
titles = StrReplace('Michael', 'Mike').resolve(chars_data, column='name').to_list()
assert '<NAME>' in titles
def test_str_remove(chars_data):
titles = StrRemove('Michael ').resolve(chars_data, column='name').to_list()
assert 'Burnham' in titles
def test_str_remove_all(chars_data):
titles = StrRemoveAll('p').resolve(chars_data, column='name').to_list()
assert 'hilia georgiou' in titles
def test_str_map_values(chars_data):
ranks = MapValues({
'Capitain': 'Captain',
'CAPTAIN': 'Captain',
'Comander': 'Commander'
}).resolve(chars_data, column='rank').to_list()
assert 'Captain' in ranks
assert 'Capitain' not in ranks
assert 'CAPTAIN' not in ranks
assert 'Commander' in ranks
assert 'Comander' not in ranks
def test_numerical_map_values(data):
seasons = MapValues({
7: 8,
4: np.NaN
}).resolve(data, column='seasons').to_list()
assert seasons[0] == 1
assert seasons[1] == 8
assert seasons[2] == 8
assert np.isnan(seasons[3])
assert seasons[4] == 8
assert seasons[5] == 2
def test_extract_date_year(release_dates):
years = DateYear().resolve(release_dates, column='released').to_list()
# [1966, 1987, 1993, 1995, 2001, 2017, 2020, <NA>]
assert years[0] == 1966
assert years[1] == 1987
assert years[2] == 1993
assert years[3] == 1995
assert years[4] == 2001
assert years[5] == 2017
assert years[6] == 2020
assert pd.isna(years[7])
def test_extract_date_month(release_dates):
feature = DateMonth().resolve(release_dates, column='released').to_list()
# [9, 9, 1, 1, 9, 9, 1, <NA>]
assert feature[0] == 9
assert feature[1] == 9
assert feature[2] == 1
assert feature[3] == 1
assert feature[4] == 9
assert feature[5] == 9
assert feature[6] == 1
assert pd.isna(feature[7])
def test_extract_date_quarter(release_dates):
feature = DateQuarter().resolve(release_dates, column='released').to_list()
# [3, 3, 1, 1, 3, 3, 1, <NA>]
assert feature[0] == 3
assert feature[1] == 3
assert feature[2] == 1
assert feature[3] == 1
assert feature[4] == 3
assert feature[5] == 3
assert feature[6] == 1
assert pd.isna(feature[7])
def test_extract_date_week(release_dates):
feature = DateWeek().resolve(release_dates, column='released').to_list()
# [36, 40, 53, 3, 39, 38, 4, <NA>]
assert feature[0] == 36
assert feature[1] == 40
assert feature[2] == 53
assert feature[3] == 3
assert feature[4] == 39
assert feature[5] == 38
assert feature[6] == 4
assert pd.isna(feature[7])
def test_extract_date_day(release_dates):
feature = DateDay().resolve(release_dates, column='released').to_list()
# [8, 28, 3, 16, 26, 24, 23, <NA>]
assert feature[0] == 8
assert feature[1] == 28
assert feature[2] == 3
assert feature[3] == 16
assert feature[4] == 26
assert feature[5] == 24
assert feature[6] == 23
assert pd.isna(feature[7])
def test_extract_date_dow(release_dates):
feature = DateDayOfWeek().resolve(release_dates, column='released').to_list()
# [3, 0, 6, 0, 2, 6, 3, <NA>]
assert feature[0] == 3
assert feature[1] == 0
assert feature[2] == 6
assert feature[3] == 0
assert feature[4] == 2
assert feature[5] == 6
assert feature[6] == 3
assert pd.isna(feature[7])
def test_extract_date_doy(release_dates):
feature = DateDayOfYear().resolve(release_dates, column='released').to_list()
# [251, 271, 3, 16, 269, 267, 23, <NA>]
assert feature[0] == 251
assert feature[1] == 271
assert feature[2] == 3
assert feature[3] == 16
assert feature[4] == 269
assert feature[5] == 267
assert feature[6] == 23
assert pd.isna(feature[7])
def test_extract_date_hour(release_dates):
feature = DateHour().resolve(release_dates, column='released').to_list()
# [0, 0, 12, 0, 13, 0, 15, <NA>]
assert feature[0] == 0
assert feature[1] == 0
assert feature[2] == 12
assert feature[3] == 0
assert feature[4] == 13
assert feature[5] == 0
assert feature[6] == 15
assert pd.isna(feature[7])
def test_extract_date_minute(release_dates):
feature = DateMinute().resolve(release_dates, column='released').to_list()
# [0, 0, 15, 0, 53, 0, 0, <NA>]
assert feature[0] == 0
assert feature[1] == 0
assert feature[2] == 15
assert feature[3] == 0
assert feature[4] == 53
assert feature[5] == 0
assert feature[6] == 0
assert | pd.isna(feature[7]) | pandas.isna |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Task2GUI_mainFinal.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from pyqtgraph import PlotWidget
from Task2GUI_composerFinal import Ui_Form
from PyQt5 import QtCore, QtGui, QtWidgets
from Task2GUI_composerFinal import Ui_Form
import pyqtgraph.exporters
from fpdf import FPDF
import statistics
from pyqtgraph import PlotWidget
import pyqtgraph
from pyqtgraph import *
import pyqtgraph as pg
from pyqtgraph import PlotWidget, PlotItem
#from matplotlib.pyplot import draw
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft
import pandas as pd
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import QApplication, QColorDialog, QFileDialog, QFrame, QWidget, QInputDialog, QLineEdit,QComboBox
import os
import numpy as np
from PyQt5.QtWidgets import QMessageBox
import sys
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QColorDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QColor
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from pyqtgraph.graphicsItems.ScatterPlotItem import Symbols
from pyqtgraph.graphicsItems.ImageItem import ImageItem
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import io
from numpy.fft import fft, fftfreq, ifft
from scipy.fftpack import fft, ifft
from scipy import signal
import cmath
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.widget_2 = QtWidgets.QWidget(self.widget)
self.widget_2.setObjectName("widget_2")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget_2)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.splitter = QtWidgets.QSplitter(self.widget_2)
self.splitter.setMinimumSize(QtCore.QSize(100, 0))
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.mainChannel = PlotWidget(self.splitter)
self.mainChannel.setObjectName("mainChannel")
self.secindaryChannel = PlotWidget(self.splitter)
self.secindaryChannel.setObjectName("secindaryChannel")
self.verticalLayout.addWidget(self.splitter)
self.freqSlider = QtWidgets.QSlider(self.widget_2)
font = QtGui.QFont()
font.setPointSize(8)
self.freqSlider.setFont(font)
self.freqSlider.setMaximum(3)
self.freqSlider.setOrientation(QtCore.Qt.Horizontal)
self.freqSlider.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.freqSlider.setObjectName("freqSlider")
self.verticalLayout.addWidget(self.freqSlider)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.mainLabel = QtWidgets.QLabel(self.widget_2)
font = QtGui.QFont()
font.setPointSize(10)
self.mainLabel.setFont(font)
self.mainLabel.setObjectName("mainLabel")
self.verticalLayout_2.addWidget(self.mainLabel)
self.secondaryLabel = QtWidgets.QLabel(self.widget_2)
font = QtGui.QFont()
font.setPointSize(10)
self.secondaryLabel.setFont(font)
self.secondaryLabel.setObjectName("secondaryLabel")
self.verticalLayout_2.addWidget(self.secondaryLabel)
self.horizontalLayout_3.addLayout(self.verticalLayout_2)
self.horizontalLayout_2.addWidget(self.widget_2)
self.horizontalLayout.addWidget(self.widget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuComposer = QtWidgets.QMenu(self.menubar)
self.menuComposer.setObjectName("menuComposer")
self.menuSignal_processes = QtWidgets.QMenu(self.menubar)
self.menuSignal_processes.setObjectName("menuSignal_processes")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionOpen_file = QtWidgets.QAction(MainWindow)
self.actionOpen_file.setObjectName("actionOpen_file")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionOpen_composer = QtWidgets.QAction(MainWindow)
self.actionOpen_composer.setObjectName("actionOpen_composer")
self.actionSample = QtWidgets.QAction(MainWindow)
self.actionSample.setObjectName("actionSample")
self.actionReconstruct = QtWidgets.QAction(MainWindow)
self.actionReconstruct.setObjectName("actionReconstruct")
self.actionShow_2nd_Ch = QtWidgets.QAction(MainWindow)
self.actionShow_2nd_Ch.setObjectName("actionShow_2nd_Ch")
self.actionHide_2nd_Ch = QtWidgets.QAction(MainWindow)
self.actionHide_2nd_Ch.setObjectName("actionHide_2nd_Ch")
self.menuFile.addAction(self.actionOpen_file)
self.menuFile.addAction(self.actionExit)
self.menuComposer.addAction(self.actionOpen_composer)
self.menuSignal_processes.addAction(self.actionSample)
self.menuSignal_processes.addAction(self.actionReconstruct)
self.menuSignal_processes.addAction(self.actionShow_2nd_Ch)
self.menuSignal_processes.addAction(self.actionHide_2nd_Ch)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuComposer.menuAction())
self.menubar.addAction(self.menuSignal_processes.menuAction())
self.actionOpen_composer.triggered.connect(lambda:self.openSecond())
self.timer1 = QtCore.QTimer()
self.time1=0
self.amp1=0
self.ampArray=0
self.timeSample=0
self.numSamples=0
self.samplingInterval=0
self.Fsample=0
self.color = "#ffaa00"
self.timerInterval = 1
self.coeffSample=0
self.mainChannel.setXRange(0, 2, padding=0)
self.mainChannel.setLimits(xMin=0)
self.mainChannel.setLimits(xMax=20)
self.mainChannel.setLimits(yMin=-20)
self.mainChannel.setLimits(yMax=20)
self.array1=0
self.array2=0
self.array3=0
self.secindaryChannel.setXRange(0, 2, padding=0)
self.secindaryChannel.setLimits(xMin=0)
self.secindaryChannel.setLimits(xMax=62)
self.secindaryChannel.setLimits(yMin=-20)
self.secindaryChannel.setLimits(yMax=20)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.mainLabel.setText(_translate("MainWindow", "Main Channel"))
self.secondaryLabel.setText(_translate("MainWindow", "Secondary Channel"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuComposer.setTitle(_translate("MainWindow", "Composer"))
self.menuSignal_processes.setTitle(_translate("MainWindow", "Signal processes"))
self.actionOpen_file.setText(_translate("MainWindow", "Open file"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionExit.setShortcut(_translate("MainWindow", "esc"))
self.actionOpen_composer.setText(_translate("MainWindow", "Open composer"))
self.actionSample.setText(_translate("MainWindow", "Sample"))
self.actionReconstruct.setText(_translate("MainWindow", "Reconstruct"))
self.actionShow_2nd_Ch.setText(_translate("MainWindow", "Show 2nd Ch"))
self.actionHide_2nd_Ch.setText(_translate("MainWindow", "Hide 2nd Ch"))
self.actionExit.triggered.connect(lambda: self.exitApp())
self.actionOpen_file.triggered.connect(lambda: self.openFile())
self.actionSample.triggered.connect(lambda: self.signalSample(self.time1,self.amp1,self.coeffSample))
self.freqSlider.valueChanged.connect(lambda: self.signalSample(self.time1,self.amp1,self.freqSlider.value()))
self.actionHide_2nd_Ch.triggered.connect(lambda: self.hideSecondChannel())
self.actionShow_2nd_Ch.triggered.connect(lambda: self.showSecondChannel())
self.actionReconstruct.triggered.connect(lambda: self.reConstruct(self.numSamples, self.samplingInterval, self.ampArray, self.timeSample))
def openFile(self):
"""opens a file from the brower """
file_path=QFileDialog.getOpenFileName()
self.file_name=file_path[0].split('/')[-1]
self.read_data(self.file_name)
def read_data(self,file_name):
"""loads the data from chosen file"""
global dataFile
dataFile= | pd.read_csv(file_name) | pandas.read_csv |
'''
Purpose: collect users info from twitter by search quary keywords.
Methods : use tweepy and api key from my delveloper accounts
Author : <NAME>
published : 2022- 02- 15
Remakrs : still need to fix the rate limits
Generation of .exe file : pyinstaller --onefile scriptname.py
'''
import tkinter as tk
import tweepy
import time
from tweepy import OAuthHandler
import pandas as pd
import nltk
from nltk import word_tokenize, pos_tag, pos_tag_sents
import re
#nltk.download('punkt')
#nltk.download('averaged_perceptron_tagger')
import datetime
current_date = datetime.datetime.now()
# root window
root = tk.Tk()
root.geometry('500x300')
root.resizable(0,0)
root.title('Twitter Data GeneratorV-2.0')
def my_function():
#twitter credentials
consumer_key = "API KEY"
consumer_secret = "API KEY"
access_key = "API KEY"
access_secret = "API KEY"
# Pass your twitter credentials to tweepy via its OAuthHandler
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
api.wait_on_rate_limit = True
api.wait_on_rate_limit_notify = True
api = tweepy.API(auth, wait_on_rate_limit=True)
id = my_entry.get()
text_query = (id)#'managing editor'
max_tweets = 1000
# Creation of query method using parameters
tweets = tweepy.Cursor(api.search_users,q=text_query ,count=5,include_entities=True).items(max_tweets) #
# Pulling information from tweets iterable object
try:
# Add or remove tweet information you want in the below list comprehension
tweets_list = [[tweet.name,tweet.screen_name, tweet.id_str,tweet.location, tweet.url, tweet.description, tweet.verified, tweet.followers_count, tweet.friends_count, tweet.statuses_count, tweet.listed_count, tweet.created_at, tweet.profile_image_url_https, tweet.default_profile] for tweet in tweets]
# Creation of dataframe from tweets_list
except:
tweets_list = [[tweet.name,tweet.screen_name, tweet.id_str,tweet.location, tweet.url, tweet.description, tweet.verified, tweet.followers_count, tweet.friends_count, tweet.statuses_count, tweet.listed_count, tweet.created_at, tweet.profile_image_url_https, tweet.default_profile] for tweet in tweets]
time.sleep(10)
# Did not include column names to simplify code
tweets_df = | pd.DataFrame(tweets_list) | pandas.DataFrame |
from skimage import (
color, feature, filters, measure, morphology, segmentation, util
)
import os
import pandas as pd
import numpy as np
import skimage.color
from scipy.sparse import coo_matrix
from scipy.sparse import load_npz, save_npz
from skimage.measure import label, regionprops
import scanpy as sc
import matplotlib.pyplot as plt
from skimage.segmentation import watershed, expand_labels
from matplotlib.pyplot import rc_context
import matplotlib as mpl
import scanpy as sc
import numpy as np
from matplotlib.pyplot import rc_context
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import os
#import tangram as tg
import scanpy as sc
import pandas as pd
#import squidpy as sq
from math import ceil
# Show plots as part of the notebook
import io
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import numpy as np
import scanpy as sc
import pandas as pd
from sklearn.metrics.pairwise import euclidean_distances
def get_object_info(segementation_result):
region_proporties = measure.regionprops(segementation_result)
area = []
index = []
x = []
y = []
for i in range(len(measure.regionprops(segementation_result))):
centroid_intermediate = region_proporties[i].centroid
centroid_intermediate = list(centroid_intermediate)
area_intermediate = region_proporties[i].area
x.append(centroid_intermediate[1])
y.append(centroid_intermediate[0])
area.append(area_intermediate)
index.append(i) # create dataframe
cell_info = pd.DataFrame(index)
cell_info['x'] = x
cell_info['y'] = y
cell_info['area'] = area
return cell_info
def assign_spots_to_cells(segmentation_labels, spots):
from scipy import ndimage as ndi
spots1 = spots[["y", "x"]]
cell_labels = ndi.map_coordinates(
segmentation_labels,
spots1.T, # assuming spot coords has shape (n, 2)
order=0,
)
spots["cell"] = cell_labels
return spots
def Diff(li1, li2):
return list(set(li1) - set(li2)) + list(set(li2) - set(li1))
def create_anndata_obj(spots_file,
segmentation_mask,
output_file,
filter_data=True,
metric = 'distance',
write_h5ad = True,
value= 1.2,
convert_coords = True,
conversion_factor = 0.1625):
print('reading spots file')
spots = pd.read_csv(spots_file)
if filter_data==True:
spots_filtered = spots[spots[metric] < value]
else:
spots_filtered = spots
spots_filtered = spots_filtered[['target','xc','yc']]
if convert_coords == True:
spots_filtered['x'] = spots_filtered['xc']/conversion_factor
spots_filtered['y'] = spots_filtered['yc']/conversion_factor
else:
spots_filtered['x'] = spots_filtered['xc']
spots_filtered['y'] = spots_filtered['yc']
spots_filtered = spots_filtered.rename(columns = {'target':'Gene'})
spots_filtered = spots_filtered[['Gene','x','y']]
spots_filtered = spots_filtered.dropna()
coo = load_npz(segmentation_mask)
print('load coo file')
assinged = assign_spots_to_cells(coo.toarray(), spots_filtered)
cells = get_object_info(coo.toarray())
cells[0] = cells[0]+1
print('assign spots to cells')
assigned_filt = assinged[assinged.cell != 0]
hm = assinged.groupby(['Gene','cell']).size().unstack(fill_value=0)
hm = hm.drop(columns = 0)
an_sp = sc.AnnData(X=hm.T)
cells[0] = cells[0].astype(str)
cells_filt = cells[cells[0].isin(list(an_sp.obs.index))]
an_sp.obs = cells_filt
an_sp.obs = an_sp.obs.drop(columns = 0)
cells[0].astype(int)-1
if write_h5ad == True:
print('write h5ad')
an_sp.write_h5ad(output_file)
else:
print('not writing')
return an_sp
def recluster_specific_cluster(anndata,
to_cluster,
rerun_umap = False,
resolutions = [0.1,0.2,0.3,0.5]):
import scanpy as sc
to_cluster = [to_cluster]
anndata_int = anndata[anndata.obs.cell_type.isin(to_cluster)]
sc.pp.neighbors(anndata_int, n_neighbors=30, n_pcs=30)
if rerun_umap == True:
sc.tl.umap(anndata_int, min_dist=1)
for i in resolutions:
print('clustering at resolution: '+str(i))
plt.rcdefaults()
sc.tl.leiden(anndata_int, resolution =i, key_added = ("cell_type_" + str(i)))
plt.rcdefaults()
with rc_context({'figure.figsize': (10, 10), 'figure.dpi': 50}):
sc.pl.umap(anndata_int, color = ("cell_type_"+str(i)),s=30,legend_loc='on data',legend_fontsize=20,legend_fontoutline=10)
return anndata_int
def plot_umap(anndata,
color = 'cell_type',
compute_umap=False,
n_neighbors=30,
n_pcs=30,
min_dist=1,
fig_size = (10, 10),
fig_dpi = 50,
s=20,
legend_loc='on data',
legend_fontsize=15,
legend_fontoutline=10
):
if compute_umap == True:
sc.pp.neighbors(anndata, n_neighbors=n_neighbors, n_pcs=n_pcs)
sc.tl.umap(anndata, min_dist=min_dist)
plt.rcdefaults()
with rc_context({'figure.figsize': fig_size, 'figure.dpi': fig_dpi}):
sc.pl.umap(anndata, color = (color),s=s,legend_loc=legend_loc,legend_fontsize=legend_fontsize,legend_fontoutline=legend_fontoutline, frameon = False, title = ' ')
def plot_marker_genes(anndata,
cluster_label,
method='t-test',
key_added = "t-test",
n_genes=25, sharey=False, key = "t-test"):
sc.tl.rank_genes_groups(anndata, cluster_label, method=method, key_added = key_added)
plt.rcdefaults()
sc.pl.rank_genes_groups(anndata, n_genes=n_genes, sharey=sharey, key = key_added)
def plot_clusters(anndata,
clusters_to_map,
broad_cluster,
cluster_label_type = int,
key='t-test',
size = 0.5,
number_of_marker_genes = 10,
sample_id_column = 'sample_id',
dim_subplots = [3,3]
):
mpl.rcParams['text.color'] = 'w'
plt.style.use('dark_background')
clusters_to_map = clusters_to_map
cluster_class = {}
marker_genes = {}
for broad in sorted(list(anndata.obs[broad_cluster].unique())):
anndata_broad = anndata[anndata.obs[broad_cluster] == broad]
print(' ')
print(broad)
for cluster in sorted(list(anndata_broad.obs[clusters_to_map].unique().astype(cluster_label_type))):
print(cluster)
genes = list(sc.get.rank_genes_groups_df(anndata_broad,group=str(cluster), key=key)['names'].head(number_of_marker_genes))
print(*list(genes), sep = " ")
spatial_int = anndata_broad[anndata_broad.obs[clusters_to_map] == str(cluster)]
fig, axs = plt.subplots(dim_subplots[0],ceil(len(anndata_broad.obs[sample_id_column].unique())/dim_subplots[1]), figsize=(20, 10))
fig.subplots_adjust(hspace = .5, wspace=.001)
fig.suptitle('Cluster: '+str(cluster))
axs = axs.ravel()
for q, j in enumerate(sorted(list(anndata_broad.obs[sample_id_column].unique()))):
spatial_celltypes_tag_ = spatial_int[spatial_int.obs[sample_id_column]==j]
axs[q].plot((anndata[anndata.obs[sample_id_column] == j].obs.x), (anndata[anndata.obs[sample_id_column] == j].obs.y), marker='s', linestyle='', ms=size, color = 'grey', alpha = 0.2)
axs[q].plot(spatial_celltypes_tag_.obs.x, spatial_celltypes_tag_.obs.y, marker='s', linestyle='', ms=size, color = 'yellow')#spatial_int.uns['leiden_0.4_colors'][0])
axs[q].set_title(str(str(j)))
axs[q].axis('scaled')
axs[q].axis('off')
plt.show()
def spatial_neighborhood(anndata,
cluster_label = 'leiden_0.5',
max_distance_allowed = 300,
umap_dist = 1,
leiden_resolution = 0.2
):
distances_input=np.array([anndata.obs['x'],anndata.obs['y']])
din=distances_input.transpose()
distances=euclidean_distances(din, din)
dist_df=pd.DataFrame(distances)
max_distance_allowed=max_distance_allowed
dist_binary=((dist_df<max_distance_allowed)*1)*((dist_df!=0)*1)
np.sum(np.sum(dist_binary))
dist_binary['name']=list(anndata.obs[cluster_label])
distbinsum=dist_binary.groupby('name').sum()
adata=sc.AnnData(distbinsum.transpose())
adata.obs=anndata.obs
sc.tl.umap(adata,min_dist=umap_dist)
sc.tl.leiden(adata,resolution=leiden_resolution, key_added = 'local_neighborhood')
return adata
def create_ann_tiles(sample_path,
segmentation_folder = '/cell_segmentation/',
expand = True,
expand_distance = 30,
anndata_file_output = 'anndata.h5ad'):
path = sample_path+segmentation_folder
files = os.listdir(path)
#seg_files = [k for k in files if 'tile' in k]
#seg_files = [k for k in seg_files if '.npz' in k]
tiles = pd.read_csv(sample_path+ "/preprocessing/ReslicedTiles/tilepos.csv", header = None)
try:
spots = pd.read_csv(sample_path+'/decoded.csv').dropna()
except:
spots = pd.read_csv(sample_path+'/spots_PRMC.csv').dropna()
to_append = []
for i in sorted(spots.fov.unique()):
#try:
tile_int = tiles.iloc[i]
file_name = 'tile'+str(i+1)+'.npz'
image = load_npz(path+file_name).toarray()
if len(np.unique(image)) == 1:
continue
else:
spots_filt = spots[spots.fov == i]
if expand == True:
expanded = expand_labels(image, expand_distance)
cell_labels = label(expanded)
cells = get_object_info(cell_labels)
else:
cell_labels = label(image)
cells = get_object_info(cell_labels)
#print('#'+str(len(cells)))
cells['cellid'] = cells[0]+1
assigned = assign_spots_to_cells(cell_labels, spots_filt)
assigned = assigned[['target', 'x','y','cell']]
hm = assigned.groupby(['target','cell']).size().unstack(fill_value=0)
hm = hm.drop(columns = 0)
test = hm.T.reindex(hm.T.columns.union(spots.target.unique(), sort=False), axis=1, fill_value=0)
an_sp = sc.AnnData(X=test)
cells_filt = cells[cells['cellid'].astype(str).isin(list(an_sp.obs.index))]
cells_filt['x'] = cells_filt['x']+tile_int[0]
cells_filt['y'] = cells_filt['y']+tile_int[1]
an_sp.obs = cells_filt
#print(an_sp)
an_sp.obs = an_sp.obs.drop(columns = 0)
an_sp.obs['cellid_index'] = 'tile'+str(i+1)+'_'+an_sp.obs['cellid'].astype(str)
an_sp.obs = an_sp.obs.set_index('cellid_index')
to_append.append(an_sp)
#except:
# continue
ad_concat = sc.concat(to_append)
#sc.pp.filter_cells(ad_concat, min_counts=5)
spatial = np.array(ad_concat.obs[['x','y']].astype('<f8'))
ad_concat.obsm['spatial'] = spatial
ad_concat.obsm['xy_loc'] = spatial
ad_concat.write(sample_path+anndata_file_output)
return ad_concat
def concat_anndata(sample_anndata_list,
anndata_name = 'annData_obj_expanded.h5ad'
):
adsp_list = []
for sample in sample_anndata_list:
print(sample + anndata_name)
try:
adsp = sc.read(sample + anndata_name)
adsp.obs['sample_id'] = sample
adsp_list.append(adsp)
except:
print('sample: ' + sample + anndata_name +' not found')
adsp = sc.concat(adsp_list, index_unique=None, join='outer', fill_value=0)
return adsp
def add_fov_number(spots_file,
tile_pos_file,
new_file,
tile_size = 2000,
conversion_factor=0.1625,
new_tile_column = 'fov_2000'):
spots = | pd.read_csv(spots_file) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 16 12:41:09 2019
@author: sdenaro
"""
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime as dt
from datetime import timedelta
import numpy as np
from numpy import matlib as matlib
import seaborn as sns
import statsmodels.api as sm
sns.set(style='whitegrid')
import matplotlib.cm as cm
#from sklearn.metrics import mean_squared_error, r2_score
from scipy import stats
def r2(x, y):
return stats.pearsonr(x, y)[0] ** 2
#Set Preference Customers reduction percent ('' or '_minus10' or '_minus20')
redux='_NEW'
##Load results
Results_d= pd.read_excel('BPA_net_rev_stoc_d' + redux + '.xlsx', sheet_name='Results_d')
#for e in range (1,60):
# Result_ensembles_d['ensemble' + str(e)]=pd.read_excel(BPA_net_rev_stoc_d' + redux + '.xlsx', sheet_name='ensemble' + str(e))
# print(str(e))
#
#for e in range (1,60):
# Result_ensembles_y['ensemble' + str(e)]=pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e))
# print(str(e))
#
#costs_y=pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx',sheet_name='Costs_y')
#PF_rates_avg=35.460833
#IP_rates_avg=44.030833
#Results Yearly Aggregates
Calendar_year=np.reshape(matlib.repmat(np.arange(1,1189),365,1), 1188*365, 'C' )
#PF_rev_y=Results_d.PF_rev.groupby(Calendar_year).sum()
#IP_rev_y=Results_d.IP_rev.groupby(Calendar_year).sum()
#SS_y=Results_d.SS.groupby(Calendar_year).sum()
#P_y=Results_d.P.groupby(Calendar_year).sum()
#BPA_hydro_y=Results_d.BPA_hydro.groupby(Calendar_year).sum()
PF_load_y=Results_d.PF_load.groupby(Calendar_year).sum()
IP_load_y=Results_d.IP_load.groupby(Calendar_year).sum()
MidC_y=Results_d.MidC.groupby(Calendar_year).mean()
CAISO_y=Results_d.CAISO.groupby(Calendar_year).mean()
Net_rev=pd.DataFrame(columns=['Net_Rev'])
for e in range (1,60):
Net_rev=Net_rev.append(pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[7]))
Net_rev.reset_index(inplace=True, drop=True)
Net_rev['positive']=Net_rev['Net_Rev']>0
Net_rev['negative']=Net_rev['Net_Rev']<0
####Weather data
#df_weather=pd.read_csv('../../CAPOW/CAPOW_SD/Stochastic_engine/Synthetic_weather/INDEX_synthetic_temp_wind.csv')
#df_weather.index=pd.DatetimeIndex(data=(t for t in dates if not isleap(t.year)))
#df_weather=df_weather[BPA_hydro.index[0]:BPA_hydro.index[-1]]
#
#Temp_Wind_y=df_weather.resample('D').sum()
#Temp_Wind_y=Temp_Wind_y.drop(index=pd.DatetimeIndex(data=(t for t in dates if isleap(t.year))))
#Temp_Wind_y=Temp_Wind_y.groupby(Calendar_year).max()
############ PLOTS ################################################
#Net revenue Bar plot
plt.rcParams.update({'font.size': 18})
plt.figure()
ax1 = plt.subplot()
ax1 = Net_rev['Net_Rev'].plot(kind="bar",
linewidth=0,
ax=ax1, color=Net_rev.positive.map({True:'blue', False:'red'})) # make bar plots
ax1.set_xticklabels(Net_rev.index, rotation = 0)
ax1.set_title('Yearly Net Revenue')
ax1.xaxis.set_ticks(np.arange(1, 1188, 20))
#ax1.set_xticklabels([i for i in range(1,1200,59)])
ax1.set_xticklabels([],[])
ax1.set_yticklabels([i for i in np.arange(-1,2,0.5)])
ax1.set_ylabel('B$')
ax1.grid(linestyle='-', linewidth=0.2)
#axbis = ax1.twinx()
#axbis.plot(TDA_y, 'steelblue')
#axbis.set_yticks([], [])
#plt.xlim(-1.7 ,20)
plt.tight_layout()
plt.savefig('figures/NetRev1200' + redux)
## Draw the density plot
plt.figure()
ax_pdf=sns.kdeplot(pow(10,-6)*Net_rev['Net_Rev'], shade=True)
# Plot formatting
ax_pdf.legend().set_visible(False)
plt.title('Yearly Net Revenue')
plt.xlabel('$MIllion per year')
ax_pdf.set_ylabel('density')
line = ax_pdf.get_lines()[-1]
x, y = line.get_data()
mask = x < 0
x, y = x[mask], y[mask]
ax_pdf.fill_between(x, y1=y, alpha=0.5, facecolor='red')
ax_pdf.ticklabel_format(style='sci', axis='y', scilimits=(-3,0))
#plt.text(0.5,1.5, 'mean=$M'+str(round(pow(10,-6)*Net_rev['Net_Rev'].mean()))\
# +'\n'+'std=$M'+str(pow(10,-6)*round(Net_rev['Net_Rev'].std())))
ax_pdf.set_xlim(-850,700)
#ax_pdf.set_ylim(0,3)
plt.show()
plt.savefig('figures/Rev_PDF' + redux, format='eps')
#Calculate VaR
#sort the net revs
Net_rev_sorted=Net_rev['Net_Rev'].sort_values(ascending=True)
Net_rev_sorted.reset_index(drop=True, inplace=True)
VaR_90 = Net_rev_sorted.quantile(0.1)
VaR_95 = Net_rev_sorted.quantile(0.05)
VaR_99 = Net_rev_sorted.quantile(0.01)
from tabulate import tabulate
print (tabulate([['90%', VaR_90],['95%', VaR_95], ['99%', VaR_99]], headers=['Confidence Level', 'Value at Risk']))
plt.axvline(x=VaR_90*pow(10,-6),color= 'yellow')
plt.text(VaR_90*pow(10,-6),1.5*pow(10,-3) , "VaR 90 %d" % VaR_90, rotation=90, verticalalignment='center')
plt.axvline(x=VaR_95*pow(10,-6),color= 'orange')
plt.text(VaR_95*pow(10,-6),1.5*pow(10,-3) , "VaR 95 %d" % VaR_95, rotation=90, verticalalignment='center')
plt.axvline(x=VaR_99*pow(10,-6),color= 'red')
plt.text(VaR_99*pow(10,-6),1.5*pow(10,-3) , "VaR 99 %d" % VaR_99, rotation=90, verticalalignment='center')
idx=np.where(np.diff(np.sign(Net_rev_sorted)))[0]
Negative_percent = 100*((idx+1)/len(Net_rev_sorted))
print ('Percent of negative net revs: %.2f' % Negative_percent )
plt.text(-700,1.5*pow(10,-3) , "perc negatives %f" % Negative_percent, rotation=90, verticalalignment='center')
Net_rev_avg=Net_rev['Net_Rev'].mean()
print('Average Net Revenue: %.2f' % Net_rev_avg)
plt.axvline(x=Net_rev_avg*pow(10,-6))
plt.text(Net_rev_avg*pow(10,-6),1.5*pow(10,-3) , "Average %d" % Net_rev_avg, rotation=90, verticalalignment='center')
plt.savefig('figures/Rev_PDF_lines' + redux + '.eps', format='eps')
plt.savefig('figures/Rev_PDF_lines' + redux, format='png')
#####################################################################
#### ENSEMBLE ANALYSIS ##############
#Create single ensemble horizonatal panels plot
plt.rcParams.update({'font.size': 12})
for e in range (1,60):
Net_rev_e=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[7])['Net_Rev']
Positive=Net_rev_e>0
fig, axes = plt.subplots(nrows=4, ncols=1)
ax1=axes[0]
Net_rev_e.plot(kind="bar",
linewidth=0.2,
ax=ax1,
color=Positive.map({True:'blue', False:'red'})) # make bar plots
ax1.set_title('Net Revenue Ensemble '+str(e), pad=0.6)
ax1.xaxis.set_ticks(range(1, 21, 1))
ax1.set_xticklabels([],[])
#ax1.set_xticklabels([i for i in np.arange(1,21,1)])
ax1.set_ylabel('B$')
ax1.set_xlim(-0.5,19.5)
ax1.grid(linestyle='-', linewidth=0.2, axis='x')
ax1.get_yaxis().set_label_coords(-0.08,0.5)
Reserves_e=pow(10,-9)*pd.read_excel('BPA_net_rev_stoc_y' + redux + '.xlsx', sheet_name='ensemble' + str(e), usecols=[1])
Reserves_e=Reserves_e.append( | pd.Series(Reserves_e.iloc[19]) | pandas.Series |
import numpy as np
import pandas as pd
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_non_participants,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_one_strict_assort_by_group,
)
from src.create_initial_states.make_educ_group_columns import (
_create_group_id_for_participants,
)
from src.create_initial_states.make_educ_group_columns import _determine_group_sizes
from src.create_initial_states.make_educ_group_columns import _get_id_to_weak_group
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_longest_value,
)
from src.create_initial_states.make_educ_group_columns import (
_get_key_with_shortest_value,
)
from src.create_initial_states.make_educ_group_columns import _split_data_by_query
def test_get_id_to_weak_group():
raw_id = pd.Series([2, 2, 3, 3, 4, 4, 5, 5]) # dtype int is right.
participants = pd.DataFrame(index=[2, 3, 4, 5])
participants["__weak_group_id"] = [0, 1] + [1, 0]
expected = pd.Series([0, 1], index=[3, 4])
res = _get_id_to_weak_group(participants, raw_id)
pd.testing.assert_series_equal(res, expected, check_names=False)
def test_split_data_by_query():
df = pd.DataFrame(index=list("abcde"))
df["to_select"] = [True, True, False, True, False]
query = "to_select"
res_selected, res_others = _split_data_by_query(df, query)
expected_selected = df.loc[["a", "b", "d"]]
expected_other = df.loc[["c", "e"]]
pd.testing.assert_frame_equal(res_selected, expected_selected)
pd.testing.assert_frame_equal(res_others, expected_other)
def test_create_group_id_for_participants():
df = pd.DataFrame()
df["state"] = ["BY"] * 4 + ["NRW"] * 8
df["county"] = ["N", "N", "M", "M"] + ["K"] * 5 + ["D"] * 3
group_size = 2
strict_assort_by = "state"
weak_assort_by = "county"
res = _create_group_id_for_participants(
df=df,
group_size=group_size,
strict_assort_by=strict_assort_by,
weak_assort_by=weak_assort_by,
)
expected = pd.Series(
[2, 2, 1, 1, 4, 4, 6, 6, 7, 5, 5, 7], dtype=float, name="group_id"
)
| pd.testing.assert_series_equal(res, expected) | pandas.testing.assert_series_equal |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import os
import operator
import unittest
import numpy as np
from pandas.core.api import (Index, Series, TimeSeries, DataFrame, isnull)
import pandas.core.datetools as datetools
from pandas.util.testing import assert_series_equal
import pandas.util.testing as common
#-------------------------------------------------------------------------------
# Series test cases
class TestSeries(unittest.TestCase):
def setUp(self):
self.ts = common.makeTimeSeries()
self.series = common.makeStringSeries()
self.objSeries = common.makeObjectSeries()
self.empty = Series([], index=[])
def test_constructor(self):
# Recognize TimeSeries
self.assert_(isinstance(self.ts, TimeSeries))
# Pass in Series
derived = Series(self.ts)
self.assert_(isinstance(derived, TimeSeries))
self.assert_(common.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEquals(id(self.ts.index), id(derived.index))
# Pass in scalar
scalar = Series(0.5)
self.assert_(isinstance(scalar, float))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assert_(mixed.dtype == np.object_)
self.assert_(mixed[1] is np.NaN)
self.assertRaises(Exception, Series, [0, 1, 2], index=None)
self.assert_(not isinstance(self.empty, TimeSeries))
self.assert_(not isinstance(Series({}), TimeSeries))
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
def test_constructor_corner(self):
df = common.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
self.assert_(isinstance(s, Series))
def test_fromDict(self):
data = {'a' : 0, 'b' : 1, 'c' : 2, 'd' : 3}
series = Series(data)
self.assert_( | common.is_sorted(series.index) | pandas.util.testing.is_sorted |
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from src.d04_modeling.abstract_glm import AbstractGLM
from tqdm import tqdm
from scipy.stats import poisson
class PoissonGLM(AbstractGLM):
def __init__(self, x_train, y_train, sigma2, weights=None, bias=True):
"""
:param x_train: covariate data
:param y_train: outcome data
:param sigma2: hyperparameter for regression coefficients prior
:param weights: sample weights
:param bias: add bias term to GLM
"""
super(PoissonGLM, self).__init__(x_train=x_train, y_train=y_train, bias=bias)
self._cov_map = None
self._sigma2 = sigma2
self._num_rows = len(x_train)
if weights is None:
weights = np.ones(y_train.shape).reshape((-1, 1))
self._weights = weights
def get_cov_map(self):
return self._cov_map
def log_joint(self, y, X, weights, w=None, sigma2=None):
if w is None:
w = self._w_map
if sigma2 is None:
sigma2 = self._sigma2
ljp = -np.dot(w, w)/(2 * sigma2)
ljp += np.dot(np.dot(X, w).reshape((1, -1)), y * weights)[0, 0]
ljp -= np.sum(np.exp(np.dot(X, w)).reshape((-1, 1)) * weights)
return ljp
@staticmethod
def log_joint_grad(y, X, weights, w, sigma2):
return -w/sigma2 + np.dot(X.T, y * weights).flatten() \
- np.dot(X.T, np.exp(np.dot(X, w).reshape((-1, 1)) * weights)).flatten()
@staticmethod
def log_joint_hess(y, X, weights, w, sigma2):
return -np.eye(len(w)) / sigma2 \
- np.dot(X.T, np.multiply(X, (np.exp(np.dot(X, w).reshape((-1, 1))) * weights).flatten()[:, np.newaxis]))
def compute_posterior_mode(self):
# Minimize the log joint. Normalize by N so it's better scaled.
x_train = self.get_x_train()
y_train = self.get_y_train()
obj = lambda w: -self.log_joint(y_train, x_train, self._weights, w, self._sigma2) / self._num_rows
obj_grad = lambda w: -self.log_joint_grad(y_train, x_train, self._weights, w, self._sigma2) / self._num_rows
# Keep track of the weights visited during optimization.
w_init = np.zeros(x_train.shape[1])
w_hist = [w_init]
def callback(w):
w_hist.append(w)
result = minimize(obj, w_init, jac=obj_grad, callback=callback, method="BFGS")
w_hist = np.array(w_hist)
self._w_map = w_hist[-1]
return result, w_hist
def sample_posterior_w(self, num_samples):
w_s = np.random.multivariate_normal(mean=self._w_map, cov=self._cov_map, size=num_samples)
return w_s
def compute_laplace_approximation(self):
y_train = self.get_y_train()
x_train = self.get_x_train()
if self._w_map is None:
self.compute_posterior_mode()
self._cov_map = -np.linalg.inv(self.log_joint_hess(y_train, x_train, self._weights, self._w_map, self._sigma2))
return None
def get_posterior_predictive_distribution(self, x_validate, y_validate, ncols, num_samples):
print("Sampling posterior predictive distribution...")
max_count = y_validate[:ncols].max()
num_validate = len(x_validate)
posterior_predictive_distribution = np.zeros([max_count + 1, num_validate])
w_s = self.sample_posterior_w(num_samples)
lambda_n = np.exp(np.dot(x_validate, w_s.T)) # num_validate x num_samples matrix
for k in tqdm(range(max_count + 1)):
posterior_predictive_distribution[k, :] = poisson.pmf(k, mu=lambda_n).mean(axis=1).T
return posterior_predictive_distribution
def rate_map(self, w, X):
return np.exp(np.dot(X, w).reshape((-1, 1)))
def obs_map(self, w, X):
return np.floor(self.rate_map(w, X))
if __name__ == "__main__":
import os
import seaborn as sns
import pandas as pd
from src.d01_data.dengue_data_api import DengueDataApi
os.chdir('../')
dda = DengueDataApi()
x_train, x_validate, y_train, y_validate = dda.split_data()
sigma2 = 1.
poisson_glm = PoissonGLM(x_train=x_train, y_train=y_train, sigma2=sigma2)
_, w_hist = poisson_glm.compute_posterior_mode()
w_hist_df = pd.DataFrame(w_hist, columns=x_train.columns)
weights = np.ones(len(x_train)).reshape((-1, 1))
w_hist_df['log_joint'] = w_hist_df.apply(lambda w: poisson_glm.log_joint(y_train.values.reshape((-1,1)),
x_train.values, weights,
w.values, sigma2), axis=1)
w_hist_df.name = 'iter'
axs1 = sns.lineplot(data=w_hist_df.iloc[1:].reset_index(), x="index", y="log_joint")
poisson_glm.compute_laplace_approximation()
cov_map = poisson_glm.get_cov_map()
cov_map_df = | pd.DataFrame(cov_map, index=x_train.columns, columns=x_train.columns) | pandas.DataFrame |
"""Helpers for building Dash applications."""
import csv
import json
import sqlite3
import time
from contextlib import ContextDecorator
from datetime import datetime
from pathlib import Path
import pandas as pd
from cerberus import Validator
# ----------------------------------------------------------------------------------------------------------------------
# For Working with Data
def enable_verbose_pandas(max_columns=None, max_rows=None, max_seq_items=None):
"""Update global pandas configuration for printed dataframes.
Args:
max_columns: the number of max columns. Default is None (to show all)
max_rows: the number of max rows. Default is None (to show all)
max_seq_items: the number of max sequence items. Default is None (to show all) # TODO: what does this set?
"""
# Enable all columns to be displayed at once (or tweak to set a new limit)
pd.set_option('display.max_columns', None)
| pd.set_option('display.width', None) | pandas.set_option |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
Timestamp('2013-02-28', tz='Asia/Tokyo'),
Timestamp('2013-03-31', tz='Asia/Tokyo'),
Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), Timestamp('2011-01-03', tz=tz))
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
self.assertEqual(np.min(dr),
Timestamp('2016-01-15 00:00:00', freq='D'))
self.assertEqual(np.max(dr),
Timestamp('2016-01-20 00:00:00', freq='D'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, dr, out=0)
self.assertEqual(np.argmin(dr), 0)
self.assertEqual(np.argmax(dr), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, dr, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, dr, out=0)
def test_round(self):
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=5,
freq='30Min', tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 01:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 02:00:00', tz=tz, freq='30T'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with tm.assertRaisesRegexp(ValueError, msg):
rng.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, rng.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_repeat_range(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
for tz in self.tz:
index = pd.date_range('2001-01-01', periods=2, freq='D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-02', '2001-01-02'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.date_range('2001-01-01', periods=2, freq='2D', tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01',
'2001-01-03', '2001-01-03'], tz=tz)
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = pd.DatetimeIndex(['2001-01-01', 'NaT', '2003-01-01'],
tz=tz)
exp = pd.DatetimeIndex(['2001-01-01', '2001-01-01', '2001-01-01',
'NaT', 'NaT', 'NaT',
'2003-01-01', '2003-01-01', '2003-01-01'],
tz=tz)
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_repeat(self):
reps = 2
msg = "the 'axis' parameter is not supported"
for tz in self.tz:
rng = pd.date_range(start='2016-01-01', periods=2,
freq='30Min', tz=tz)
expected_rng = DatetimeIndex([
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:00:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
Timestamp('2016-01-01 00:30:00', tz=tz, freq='30T'),
])
res = rng.repeat(reps)
tm.assert_index_equal(res, expected_rng)
self.assertIsNone(res.freq)
tm.assert_index_equal(np.repeat(rng, reps), expected_rng)
tm.assertRaisesRegexp(ValueError, msg, np.repeat,
rng, reps, axis=1)
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idx.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T',
'S', 'L', 'U'],
['day', 'day', 'day', 'day', 'hour',
'minute', 'second', 'millisecond',
'microsecond']):
for tz in self.tz:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq,
tz=tz)
self.assertEqual(idx.resolution, expected)
def test_union(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_add_iadd(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add a datelike to a DatetimeIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx + Timestamp('2011-01-01')
with tm.assertRaisesRegexp(TypeError, msg):
Timestamp('2011-01-01') + idx
def test_add_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now raises
# TypeError (GH14164)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
with tm.assertRaises(TypeError):
dti + dti
with tm.assertRaises(TypeError):
dti_tz + dti_tz
with tm.assertRaises(TypeError):
dti_tz + dti
with tm.assertRaises(TypeError):
dti + dti_tz
def test_difference(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3)]:
result_diff = rng.difference(other)
tm.assert_index_equal(result_diff, expected)
def test_sub_isub(self):
for tz in self.tz:
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10,
tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with tm.assertRaises(TypeError):
dti_tz - dti
with tm.assertRaises(TypeError):
dti - dti_tz
with tm.assertRaises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with tm.assertRaises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_comp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in self.tz:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)),
tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10,
tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10,
tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 09:00', '2013-01-01 08:00',
'2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'],
tz=tz)
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00',
pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False),
expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex,
([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H',
tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer,
np.array([2, 1, 0]),
check_dtype=False)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
for tz in self.tz:
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz=tz, name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz=tz, name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], tz=tz, name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], tz=tz, name='idx3')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D',
tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03',
'2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D',
'-3D', 'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S',
'-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.date_range('2011-01-01', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.DatetimeIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
for tz in self.tz:
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_nat(self):
self.assertIs(pd.DatetimeIndex._na_value, pd.NaT)
self.assertIs(pd.DatetimeIndex([])._na_value, pd.NaT)
for tz in [None, 'US/Eastern', 'UTC']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.DatetimeIndex(['2011-01-01', 'NaT'], tz=tz)
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
for tz in [None, 'UTC', 'US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.DatetimeIndex(['2011-01-01', '2011-01-02', 'NaT'],
tz='US/Pacific')
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
# same internal, different tz
idx3 = pd.DatetimeIndex._simple_new(idx.asi8, tz='US/Pacific')
tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)
self.assertFalse(idx.equals(idx3))
self.assertFalse(idx.equals(idx3.copy()))
self.assertFalse(idx.equals(idx3.asobject))
self.assertFalse(idx.asobject.equals(idx3))
self.assertFalse(idx.equals(list(idx3)))
self.assertFalse(idx.equals(pd.Series(idx3)))
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['qyear'],
lambda x: isinstance(x, PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'),
pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'),
pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT',
'2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'),
pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'),
pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
tm.assert_index_equal(result, expected)
for i in [0, 1, 3]:
self.assertEqual(result[i], expected[i])
self.assertIs(result[2], pd.NaT)
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertEqual(result_list[i], expected_list[i])
self.assertIs(result_list[2], pd.NaT)
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), | pd.Period('2011-01-03', freq='D') | pandas.Period |
'''💹💰💷💶💴💵💸🤖👩💻🧑💻👨💻📉📈📊📰'''
import os
import time
import pytz
import json
import logging
import requests
import datetime
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import streamlit as st
from streamlit import caching
import matplotlib.pyplot as plt
from configparser import ConfigParser
from requests.exceptions import ConnectionError
from src.db import DataBase
from src.utils_stocks import get_curr_price
from src.utils_general import get_yahoo_link
from src.utils_general import get_google_link
from src.utils_general import suppress_stdout
logging.getLogger().setLevel(logging.CRITICAL)
# directories
DIR_DB = os.path.join(os.getcwd(), 'data', 'db')
DIR_DEMO = os.path.join(os.getcwd(), 'data', 'demo')
F_CFG = os.path.join(os.getcwd(), 'config.ini')
# constants and objects
cfg = ConfigParser()
cfg.read(F_CFG)
CFG_SECTION = 'deploy_webapp'
IS_DEMO = cfg.getint(CFG_SECTION, 'IS_DEMO')
F_DEMO_DF_C = os.path.join(DIR_DEMO, 'df_c.parquet')
F_DEMO_DF_PROBA_SM = os.path.join(DIR_DEMO, 'df_proba_sm.parquet')
DATI_OLD = '19930417_0000'
if IS_DEMO:
db = DataBase([], dir_db=DIR_DEMO)
else:
db = DataBase([], dir_db=DIR_DB)
# system strings
ERROR_EXCEPTION = '{} - {}'
ERROR_CONNECTION = 'Connection error! Try again in a few seconds.'
TEXT_PAGE_TITLE = 'Five Minute Midas'
TEXT_TITLE = '''# Five Minute Midas 📈
### Predicting profitable day trading positions for *{}*.
---'''
TEXT_ADVICE = '\n ### Try changing the **Profit Probability.**'
TEXT_SYMBOLS_FOUND = '### {} of {} symbols selected.{}\n---'
TEXT_FIG = '''## {} - {} {}
#### {} - {}
{}
'''
TEXT_FIG_MULTI = '## All Symbols Summary'
TEXT_LINKS = '''[G-News]({}), [Y-Finance]({})'''
TEXT_BUTTON1 = 'Refresh Cache'
TEXT_BUTTON3 = 'or Show All'
TEXT_EXPLAIN = 'Explain'
TEXT_STR_EXPLAIN_1 = 'Latest price: ${}, {} from day before'
TEXT_STR_EXPLAIN_2 = '- At {}, there was {}% chance of profit. Actual profit: {}%'
TEXT_STR_EXPLAIN_3 = '''Price Chart
- Red Line - Volume Weighted Average Price (VWAP)
- Red Point - Bullish RSI Div, current profit *negative*
- Green Point - Bullish RSI Div, current profit *positive*'''
TEXT_STR_EXPLAIN_4 = '''RSI Chart (14 Periods)
- Orange Line - *Overbought* Indicator
- Green Line - *Oversold* Indicator'''
TEXT_DESCRIPTION = 'Company Description'
TEXT_SELECTBOX = '' #'Symbol - Industry - Profit Probability (Latest)'
TEXT_SELECT_DEFAULT = 'Choose a Symbol...'
TEXT_SLIDER1 = 'Profit Probability (Latest)'
TEXT_SLIDER2 = 'Historical Prediction Range'
TEXT_SIDEBAR_HEADER = '### Advanced Settings'
TEXT_SIDEBAR_INPUT1 = 'Add Symbols (e.g. BYND IBM)'
TEXT_SIDEBAR_INPUT2 = 'Remove Symbols (e.g. SPOT BA)'
TEXT_SIDEBAR_INPUT3 = 'Current Positions (e.g. TSLA 630)'
TEXT_SIDEBAR_INPUT4 = 'Simulate Time Cutoff (e.g. 0945)'
TEXT_SIDEBAR_RADIO = 'Sort By'
TEXT_SIDEBAR_BUTTON = 'Show Current Profits'
TEXT_SIDEBAR_WARN_DEMO = 'Feature disabled for demo.'
TEXT_SIDEBAR_ERROR = 'Empty or invalid input.'
TEXT_SIDEBAR_INFO = '''### Information
- 💻 See code: [GitHub](https://github.com/MichaelOw/five-minute-midas)
- 🤖 Developer: [Michael](https://www.linkedin.com/in/michael-ow/)
- 📰 Read article: [Medium](https://michael-ow.medium.com/how-i-used-a-random-forest-classifier-to-day-trade-for-2-months-part-i-9c00d96d254c)
'''
dt_sort_params = {
'Profit Probability (Latest)':'proba_last',
'Profit Probability (Max)':'proba_max',
'Prediction Time (Latest)':'datetime_last',
'Symbol':'sym',
}
@st.cache()
def get_predictions_summary():
'''Makes API call to get and return dataframe containing predictions summary
Returns:
df (pandas.Dataframe)
'''
if IS_DEMO:
df_proba_sm_demo = pd.read_parquet(F_DEMO_DF_PROBA_SM)
return df_proba_sm_demo
# api call to get df_proba
url = 'http://localhost:5000/df_proba_sm'
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
r = requests.post(url, data='', headers=headers)
data = json.loads(r.text)
df = pd.DataFrame(**data)
for col in ['datetime_last', 'datetime_update']:
df[col] = | pd.to_datetime(df[col]) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.