prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!usr/bin/env python
"""
Evaluate the performance of the generative model on multiple aspects:
to be filled
"""
import pandas as pd
import numpy as np
from post_processing import data
from rdkit import Chem, DataStructs
import scipy.stats as ss
import math
from rdkit import Chem
from rdkit.Chem.Draw import IPythonConsole
from IPython.display import SVG
import time
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
def internal_sim(smi_lst):
"""
Compute internal similarity within generated SMILES
Args:
smi_lst: list of generated unique SMILE structures
Returns: Average internal molecular similarity with in the input list
"""
setV = len(smi_lst)
mols = data.get_mols(smi_lst)
fps_morgan, _ = data.get_fingerprints(mols)
total_smi = 0
for i in range(len(fps_morgan)):
for j in range(len(fps_morgan)):
total_smi += DataStructs.DiceSimilarity(fps_morgan[i], fps_morgan[j])
Din = total_smi/(setV*setV)
return Din
def external_sim(smi_lst, reference):
"""
Compute the external similarity against the source data, i.e. the average similarity between the
generated molecules and their nearest neighbours in the training set.
Args:
smi_lst: list of generated unique SMILE structures
reference: list of SMILES used for training the generation
Returns: Average external molecular similarity between generated and origin lst
"""
gen_mols, ori_mols = get_mols(smi_lst), get_mols(reference)
fps_gen, _ = get_fingerprints(gen_mols)
#print(len(smi_lst), len(fps_gen))
fps_ori, _ = get_fingerprints(ori_mols)
similarity_maxs = []
neighbours = []
for i in range(len(fps_gen)):
similarity_with = []
gen_smi = smi_lst[i]
for j in range(len(fps_ori)):
similarity_with.append(DataStructs.DiceSimilarity(fps_gen[i], fps_ori[j]))
similarity_maxs.append(max(similarity_with))
k = np.argmax(similarity_with)
ref_neighbour = similarity_with[k]
neighbours.extend([reference[k], ref_neighbour])
assert (len(similarity_maxs) == len(fps_gen))
Dext = np.sum(similarity_maxs)/len(fps_gen)
return Dext, neighbours
def KL_divergence(gen_arr, reference_arr):
"""
Args:
gen_arr: array of numeric parameters of generated molecules
reference_arr: array of original numeric parameters of training molecules
Returns: KL-divergence of value_arr against reference_arr
"""
epsilon = 0.0001
min_val = math.floor(min(min(gen_arr), min(reference_arr)))
max_val = math.ceil(max(max(gen_arr), max(reference_arr)))
gen_arr_dis = np.histogram(gen_arr, bins=12,range=(min_val, max_val), density=True)[0] + epsilon
reference_arr_dis = np.histogram(reference_arr, bins=12, range=(min_val, max_val), density=True)[0] + epsilon
entropy = ss.entropy(reference_arr_dis, gen_arr_dis)
return entropy
def generate_metric_df():
all_exp_df = pd.read_csv('exp_df_merged.csv')
all_gen_df = pd.read_csv('novel_sampled_merged.csv')
eval_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by <NAME>
import argparse
import pandas as pd
import numpy as np
from tqdm import trange
def parse_args():
"""
Parse command-line arguments.
"""
parser = argparse.ArgumentParser('Extract user sessions from log.')
parser.add_argument('input', help='an input file')
parser.add_argument('output', help='an output file')
parser.add_argument('-p', '--protocol', default='6', type=str,
help='extract sessions with a specified protocol')
parser.add_argument('-t', '--maxtime', default=0.01, type=float,
help='largest time within a session allowed')
parser.add_argument('-d', '--duration', default=0.01, type=float,
help='maximum session duration allowed')
parser.add_argument('-n', '--ipnum', default=0, type=int,
help='number of destination IPs for which sessions will be extracted')
return parser.parse_args()
def split(arr, cond):
return [arr[cond], arr[~cond]]
def extract_session(input_file, output_file, protocol, maxtime, duration, ipnum):
"""
Given handles of input and output files, read the connection
statistics from the input file and output the connections grouped
by their destination to the output handle.
:param input_file: an input handle where connection statistics
are read from
:param output_file: an output handle where grouped statistics
are written to
:param protocol: a protocol which connections are to be considered
:type protocol: str
:return: the tuple of two numbers: the number of connection
records processed and the number of grouped records written
to the specified output file
:rtype: tuple
"""
# Read data from log
print('Reading data from file ...')
data = pd.read_csv(input_file, sep='\t', header=None, encoding='utf-8',
names=['ptime', 'ipsrc', 'ipdst', 'proto', 'psize'],
dtype={'ptime': float, 'ipsrc': str, 'ipdst': str, 'proto': str, 'psize': int},
compression='gzip')
data = data[data.proto == protocol]
data = data[np.isfinite(data['psize'])]
print('Ranking destination IPs ...')
all_ipdst = pd.Series.value_counts(data.ipdst)
all_ipdst = all_ipdst[all_ipdst > 1]
if ipnum == 0:
ipnum = len(all_ipdst)
all_ipdst = all_ipdst.index
else:
if len(all_ipdst) > ipnum:
all_ipdst = all_ipdst.index[0:ipnum]
else:
all_ipdst = all_ipdst.index
for i in trange(ipnum, desc='Extracting user sessions:'):
data1 = data[data.ipdst == all_ipdst[i]]
data1 = data1.set_index(np.arange(len(data1.ptime)))
dtime = np.diff(np.asarray(data1.ptime))
dtime = np.insert(dtime, 0, 0.0)
ptime1 = data1.iloc[1::]['ptime']
ncon2 = np.ones(len(data1.ptime), dtype=np.int64)
ind = np.asarray(np.where(dtime > maxtime))
ind22 = []
dtime_tmp = 0
for j in range(len(dtime)):
if j in ind:
dtime_tmp = 0
if dtime_tmp > duration:
ind22.append(j-1)
dtime_tmp = dtime[j]
else:
dtime_tmp += dtime[j]
ind22 = np.asarray(ind22)
ind = np.unique(np.sort(np.hstack((ind[0], ind22))))
ind = ind.astype(int)
stime = ptime1[ind]
stime = np.hstack([data1.iloc[0]['ptime'], stime])
if len(ind) != len(data1.ptime)-1:
ptime2 = np.asarray(np.array_split(data1.ptime, ind))
dtime1 = [np.diff(a) for a in ptime2]
else:
dtime1 = np.diff(np.asarray(data1.ptime))
dtime2 = [np.sum(a) for a in dtime1]
ssize2 = np.array(data1.psize)
if len(ind) != len(data1.ptime) - 1:
ssize1 = np.asarray(np.array_split(ssize2, ind))
ssize = [np.sum(a) for a in ssize1]
else:
ssize = ssize2
if ssize[0] == 0:
ssize[0] = ssize2[0]
ncon1 = np.asarray(np.array_split(ncon2, ind))
ncon = [np.sum(a) for a in ncon1]
if ncon[0] == 0:
ncon[0] = ncon2[0]
# else:
# dtime2 = np.sum(dtime)
# ssize = np.sum(data1.psize)
# ncon = np.sum(ncon2)
# for j in range(len(dtime)):
# if dtime[j] < maxtime:
# ssize[l] += data1.iloc[j]['psize']
# ncon[l] += 1
# else:
# l += 1
# ssize[l] = data1.iloc[j]['psize']
stime = pd.Series(stime, name='stime', dtype=np.float64)
sip1 = np.repeat(all_ipdst[i], len(stime))
sip = | pd.Series(sip1, name='ipdst', dtype=str) | pandas.Series |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-04T16:00"),
pd.Timestamp("2017-01-05T16:00"),
pd.Timestamp("2017-01-05T16:00")])
ac_ts_ex = pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx)
assert_series_equal(ac_ts, ac_ts_ex)
def test_get_actions_weekend_filter(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-06T10:30")
new_ts = pd.Timestamp("2017-01-09T16:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-06T16:00"),
pd.Timestamp("2017-01-09T16:00"),
pd.Timestamp("2017-01-09T16:00")])
ac_ts_ex = | pd.Series(["PNL", "INTEREST", "PNL", "INTEREST"], index=idx) | pandas.Series |
import os
from typing import Union
import math
import pandas as pd
import numpy as np
import sha_calc as sha
from gmhazard_calc import site
from gmhazard_calc import gm_data
from gmhazard_calc import constants as const
from gmhazard_calc.im import IMComponent
from .NZTAResult import NZTAResult
from qcore import geo
# The following CSV file was based on p.147 NZTA Bridge Manual Commentary,
# where, Lat and Lon of each town was obtained from wikipedia (produced by geohack.toolforge.org)
# if the Lat and Lon is in water, a government office location is used instead.
# (eg. regional council for Huntly, Thames, police station for Oban)
# Vs30 values were obtained from Kevin's vs30 map (Release 2020: https://github.com/ucgmsim/Vs30/releases/tag/1)
NZTA_LOOKUP_FFP = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "data", "NZTA_data_lat_lon_vs30.csv"
)
DEFAULT_RETURN_PERIODS = np.array([20, 25, 50, 100, 250, 500, 1000, 2000, 2500])
DEFAULT_EXCEEDANCE_VALUES = 1 / DEFAULT_RETURN_PERIODS
def run_ensemble_nzta(
ensemble: gm_data.Ensemble,
site_info: site.SiteInfo,
exceedance_values: np.ndarray = DEFAULT_EXCEEDANCE_VALUES,
soil_class: const.NZTASoilClass = None,
im_component: IMComponent = IMComponent.RotD50,
):
"""Runs NZTA for the specified site and ensemble
Note:
Parameters
----------
ensemble: Ensemble
The ensemble does not affect calculation at all,
purely included for consistency/completeness
site_info: SiteInfo
The site for which to compute NZTA code hazard
exceedance_values: array of floats, optional
soil_class: NZTASoilClass, optional
The soil class to use, if not specified then
this is computed based on the vs30 of the site
Returns
-------
NZTAResult
"""
# Load the required NZTA data
nzta_df = pd.read_csv(NZTA_LOOKUP_FFP, header=0, index_col=0)
soil_class = (
soil_class if soil_class is not None else get_soil_class(site_info.vs30)
)
# Get the return periods
rp_values = 1 / exceedance_values
# Compute PGA and retrieve effective magnitude
C0_1000, nearest_town = get_C0_1000(
site_info.lat, site_info.lon, soil_class, nzta_df=nzta_df
)
pga_values, M_eff = [], None
for cur_rp in rp_values:
cur_pga, M_eff = get_pga_meff(C0_1000, nearest_town, cur_rp, nzta_df=nzta_df)
pga_values.append(cur_pga)
if im_component != IMComponent.Larger:
ratio = sha.get_computed_component_ratio(
str(IMComponent.Larger),
str(im_component),
# Using period of 0.01 for PGA IM
0.01,
)
pga_values = [value * ratio for value in pga_values]
return NZTAResult(
ensemble,
site_info,
soil_class,
pd.Series(index=exceedance_values, data=pga_values),
M_eff,
C0_1000,
nearest_town,
)
def get_C0_1000(
lat: float,
lon: float,
soil_class: const.NZTASoilClass,
nzta_df: pd.DataFrame = None,
):
"""
Returns
-------
1. C_0,1000 value for the given vs30 value at the closest location
2. the name of the closest town
"""
nzta_df = (
pd.read_csv(NZTA_LOOKUP_FFP, header=0, index_col=0)
if nzta_df is None
else nzta_df
)
town, _ = __location_lookup(lat, lon, nzta_df)
if soil_class is const.NZTASoilClass.rock:
return nzta_df["C_0_1000_AB"].loc[town], town
else:
return nzta_df["C_0_1000_DE"].loc[town], town
def get_pga_meff(
C0_1000: float,
town: str,
RP: Union[int, float],
nzta_df: pd.DataFrame = None,
):
"""
Returns
-------
float/NaN:
PGA computed based on c_0,1000 value for the given location and return period
if C_0,1000 is not 0 else NaN
float:
Effective magnitudes for design return period (years)
"""
nzta_df = (
| pd.read_csv(NZTA_LOOKUP_FFP, header=0, index_col=0) | pandas.read_csv |
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon,Point
import math
import numpy as np
def rect_grids(bounds,accuracy = 500):
'''
Generate the rectangular grids in the bounds
Parameters
-------
bounds : List
Create the bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Grid size (meter)
Returns
-------
grid : GeoDataFrame
Grids’ GeoDataFrame, LONCOL and LATCOL are the index of grids, HBLON and HBLAT are the center of the grids
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360));
deltaLat = accuracy * 360 / (2 * math.pi * 6371004);
data = gpd.GeoDataFrame()
LONCOL_list = []
LATCOL_list = []
geometry_list = []
HBLON_list = []
HBLAT_list = []
lonsnum = int((lon2-lon1)/deltaLon)+1
latsnum = int((lat2-lat1)/deltaLat)+1
for i in range(lonsnum):
for j in range(latsnum):
HBLON = i*deltaLon + lonStart
HBLAT = j*deltaLat + latStart
HBLON_1 = (i+1)*deltaLon + lonStart
HBLAT_1 = (j+1)*deltaLat + latStart
grid_ij = Polygon([
(HBLON-deltaLon/2,HBLAT-deltaLat/2),
(HBLON_1-deltaLon/2,HBLAT-deltaLat/2),
(HBLON_1-deltaLon/2,HBLAT_1-deltaLat/2),
(HBLON-deltaLon/2,HBLAT_1-deltaLat/2)])
LONCOL_list.append(i)
LATCOL_list.append(j)
HBLON_list.append(HBLON)
HBLAT_list.append(HBLAT)
geometry_list.append(grid_ij)
data['LONCOL'] = LONCOL_list
data['LATCOL'] = LATCOL_list
data['HBLON'] = HBLON_list
data['HBLAT'] = HBLAT_list
data['geometry'] = geometry_list
params = (lonStart,latStart,deltaLon,deltaLat)
return data,params
def grid_params(bounds,accuracy = 500):
'''
Generate gridding params
Parameters
-------
bounds : List
Bounds of the study area, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Grid size (meter)
Returns
-------
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Examples
-------
>>> import transbigdata as tbd
>>> bounds = [113.6,22.4,114.8,22.9]
>>> tbd.grid_params(bounds,accuracy = 500)
(113.6, 22.4, 0.004872390756896538, 0.004496605206422906)
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360));
deltaLat = accuracy * 360 / (2 * math.pi * 6371004);
return (lonStart,latStart,deltaLon,deltaLat)
def GPS_to_grids(lon,lat,params):
'''
Match the GPS data to the grids. The input is the columns of longitude, latitude, and the grids parameter. The output is the grid ID.
Parameters
-------
lon : Series
The column of longitude
lat : Series
The column of latitude
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
'''
(lonStart,latStart,deltaLon,deltaLat) = params
import numpy as np
loncol = np.floor(((lon - (lonStart - deltaLon / 2))/deltaLon)).astype('int')
latcol = np.floor(((lat - (latStart - deltaLat / 2))/deltaLat)).astype('int')
return loncol,latcol
def grids_centre(loncol,latcol,params):
'''
The center location of the grid. The input is the grid ID and parameters, the output is the grid center location.
Parameters
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
HBLON : Series
The longitude of the grid center
HBLAT : Series
The latitude of the grid center
'''
(lonStart,latStart,deltaLon,deltaLat) = params
hblon = loncol*deltaLon + lonStart
hblat = latcol*deltaLat + latStart
return hblon,hblat
def gridid_to_polygon(loncol,latcol,params):
'''
Generate the geometry column based on the grid ID. The input is the grid ID, the output is the geometry.
Parameters
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
geometry : Series
The column of grid geographic polygon
'''
(lonStart,latStart,deltaLon,deltaLat) = params
HBLON = loncol*deltaLon + lonStart
HBLAT = latcol*deltaLat + latStart
HBLON_1 = (loncol+1)*deltaLon + lonStart
HBLAT_1 = (latcol+1)*deltaLat + latStart
df = pd.DataFrame()
df['HBLON'] = HBLON
df['HBLAT'] = HBLAT
df['HBLON_1'] = HBLON_1
df['HBLAT_1'] = HBLAT_1
return df.apply(lambda r:Polygon([
(r['HBLON']-deltaLon/2,r['HBLAT']-deltaLat/2),
(r['HBLON_1']-deltaLon/2,r['HBLAT']-deltaLat/2),
(r['HBLON_1']-deltaLon/2,r['HBLAT_1']-deltaLat/2),
(r['HBLON']-deltaLon/2,r['HBLAT_1']-deltaLat/2)]),axis = 1)
def hexagon_grids(bounds,accuracy = 500):
'''
Generate hexagonal grids in the bounds
Parameters
-------
bounds : List
Create the bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Side length of hexagon (m)
Returns
-------
hexagon : GeoDataFrame
hexagon grid’s geographic polygon
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
latEnd = max(lat1, lat2);
lonEnd = max(lon1, lon2);
origin = gpd.GeoDataFrame([Point(lonStart,latStart),Point(lonEnd,latEnd)],columns = ['geometry'])
origin.crs = {'init':'epsg:4326'}
origin = origin.to_crs(epsg = 3857)
x_o = origin['geometry'].iloc[0].x
y_o = origin['geometry'].iloc[0].y
x_d = origin['geometry'].iloc[1].x
y_d = origin['geometry'].iloc[1].y
lonsnum = (x_d-x_o)/accuracy
latsnum = (y_d-y_o)/accuracy
#1
xs = np.arange(0,lonsnum,3)
ys = np.arange(0,latsnum,2*(3/4)**0.5)
xs = pd.DataFrame(xs,columns = ['x'])
xs['tmp'] = 1
ys = pd.DataFrame(ys,columns = ['y'])
ys['tmp'] = 1
df1 = pd.merge(xs,ys)
#2
xs = np.arange(1.5,lonsnum,3)
ys = np.arange((3/4)**0.5,latsnum,2*(3/4)**0.5)
xs = pd.DataFrame(xs,columns = ['x'])
xs['tmp'] = 1
ys = pd.DataFrame(ys,columns = ['y'])
ys['tmp'] = 1
df2 = pd.merge(xs,ys)
df = | pd.concat([df1,df2]) | pandas.concat |
import numpy as np
import pandas as pd
from fox_toolbox.utils.rates import Curve, RateCurve, Swap, Swaption, Volatility
from collections import namedtuple
swap_rate_model = namedtuple('swap_rate_model', 'mtype a b neff')
cms_result = namedtuple('cms_result', 'swap_fwd disc_Tf_Tp')
csvCMSFlow = namedtuple('csvCMSFlow', 'CMS_length vol fixing_date pmnt_date strike model n strike_min strike_max callput calib_basket fwd discTfTp')
csvSwaplet = namedtuple('csvSwaplet', 'swaplet disc_Tf_Tp strike caplet floorlet adjCMSrate')
pricingCMSflow = namedtuple('pricingCMSflow', 'caplet floorlet swaplet')
def get_calib_basket(one_column_df):
tsr_columns = ['Index','Strike','RepFwd','Weights','Disc/A','Vol','AdjVol', 'SwoPrice', 'AdjSwoPrice', 'Vega']
cal_basket = | pd.DataFrame(columns=tsr_columns) | pandas.DataFrame |
import numpy as np
import os
import pickle
import scipy.sparse as sp
from pathlib import Path
import wget
import pickle
import os
import pandas as pd
import numpy as np
import torch
def get_project_root() -> Path:
return Path(__file__).parent.parent
PROJECT_ROOT = get_project_root()
def double_transition_matrix(adj_mx):
supports = []
supports.append(torch.tensor(
calculate_random_walk_matrix(adj_mx).T))
supports.append(torch.tensor(
calculate_random_walk_matrix(adj_mx.T).T))
return supports
def calculate_random_walk_matrix(adj_mx):
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx)
return random_walk_mx
def _exist_dataset_on_disk(dataset):
file = f'{PROJECT_ROOT}/data/METR-LA.csv' if dataset == 'la' else f'{PROJECT_ROOT}/data/PEMS-BAY.csv'
return os.path.isfile(file)
def split_data(df, rule=[0.7, 0.1, 0.2]):
assert np.isclose(np.sum(
rule), 1.0), f"sum of split rule should be 1 (currently sum={np.sum(rule):.2f})"
num_samples = df.shape[0]
num_test = round(num_samples * rule[-1])
num_train = round(num_samples * rule[0])
num_val = num_samples - num_test - num_train
train_df = df.iloc[:num_train].copy()
valid_df = df.iloc[num_train: num_train + num_val].copy()
test_df = df.iloc[-num_test:].copy()
return train_df, valid_df, test_df
def get_traffic_data(dataset, null_value=0.0):
if dataset == 'la':
fn, adj_name = 'METR-LA.csv', 'adj_mx_METR-LA.pkl'
elif dataset == 'bay':
fn, adj_name = 'PEMS-BAY.csv', 'adj_mx_PEMS-BAY.pkl'
else:
raise ValueError("dataset name should be either 'bay' or 'la")
data_url = f'https://zenodo.org/record/5724362/files/{fn}'
sup_url = f'https://zenodo.org/record/5724362/files/{adj_name}'
if not _exist_dataset_on_disk(dataset):
wget.download(data_url, out=f'{PROJECT_ROOT}/data')
wget.download(sup_url, out=f'{PROJECT_ROOT}/data')
df = pd.read_csv(f'{PROJECT_ROOT}/data/{fn}', index_col=0)
df.index = pd.DatetimeIndex(df.index)
dt = pd.Timedelta(df.index.to_series().diff().mode().values[0])
df = df.asfreq(freq=dt, fill_value=null_value)
df = df.replace(0.0, null_value)
with open(f'{PROJECT_ROOT}/data/{adj_name}', 'rb') as f:
_, _, adj = pickle.load(f, encoding='latin1')
return df, adj
def convert_timestamp_to_feature(timestamp):
hour, minute = timestamp.hour, timestamp.minute
feature = (hour * 60 + minute) / (24 * 60)
return | pd.DataFrame(feature, index=timestamp) | pandas.DataFrame |
# =============================================================================
# Imports
# =============================================================================
# Standard
import argparse
import os
import sys
import glob
import math
import pandas as pd
import json
import numpy as np
import datetime
# =============================================================================
# Constants
# =============================================================================
COUNTRIES = [
"European Union",
"Austria",
"Belgium",
"Bulgaria",
"Croatia",
"Cyprus",
"Czechia",
"Denmark",
"Estonia",
"Finland",
"France",
"Germany",
"Greece",
"Hungary",
"Ireland",
"Italy",
"Latvia",
"Lithuania",
"Luxembourg",
"Malta",
"Netherlands",
"Poland",
"Portugal",
"Romania",
"Slovakia",
"Slovenia",
"Spain",
"Sweden",
"Norway",
"Iceland",
"Switzerland",
"United Kingdom",
"United States",
]
COUNTRIES_WITHOUT_FULL_DATA = [
"Luxembourg"
]
# =============================================================================
# Functions
# =============================================================================
def get_population(path, country):
"""Get the population for a country."""
# Use the country to identify the population data
index_col = "entity"
data = pd.read_csv(path, index_col=index_col)
return data.loc[country]["population"].values[0]
def get_data_hundred_people(data, path):
"""Get the vaccination data per hundred people."""
# Filter the numerical data to avoid errors
population = get_population(path, data["location"])
numeric_columns = data.select_dtypes("number").columns.tolist()
data[numeric_columns] = data[numeric_columns] * 100 / population
return data
def get_data_hundred_adults(data, path):
"""Get the vaccination data per hundred people."""
# Filter the numerical data to avoid errors
population = get_population(path, data["location"])
numeric_columns = data.select_dtypes("number").columns.tolist()
data[numeric_columns] = data[numeric_columns] * 100 / population
return data
def get_days_to_70(data, parameter):
"""Get the rolling average of the vaccination data."""
# Use one period for the rolling average
periods = 1
days = 7
# keep days + 1 so that diff can cçompare with the last day out of the average
data_for_average = data.tail(days + 1)
data_for_average.reset_index(inplace=True)
data_for_average["date"] = pd.to_datetime(
data_for_average["date"], format='%Y-%m-%d')
# data_for_average.iloc[-1]["date"] - data_for_average.iloc[0]["date"]
date_limit = data_for_average.iloc[-1]["date"] - \
datetime.timedelta(days=days + 1)
data_for_average = data_for_average[data_for_average["date"] > date_limit]
data_for_average = data_for_average[parameter]
data_for_average = data_for_average.dropna() # remove empty rows for diff()
difference = data_for_average.diff(periods)
seven_days_average = difference.sum() / days
return seven_days_average
def get_week_on_week(data, parameter):
"""Get the rolling average of the vaccination data."""
# if(data["location"].iloc[-1] == "Hungary"):
# return 0
# Use one period for the rolling average
periods = 1
days = 7
# keep days + 1 so that diff can cçompare with the last day out of the average
data_for_average = data.tail(days + 1)
data_for_average.reset_index(inplace=True)
data_for_average["date"] = pd.to_datetime(
data_for_average["date"], format='%Y-%m-%d')
# data_for_average.iloc[-1]["date"] - data_for_average.iloc[0]["date"]
date_limit = data_for_average.iloc[-1]["date"] - \
datetime.timedelta(days=days + 1)
data_for_average = data_for_average[data_for_average["date"] > date_limit]
data_for_average = data_for_average[parameter]
data_for_average = data_for_average.dropna() # remove empty rows for diff()
try:
difference = data_for_average.iloc[-1] - data_for_average.iloc[0]
except IndexError:
print("not available")
difference = 0
return difference
def read_data(path, path_population, path_adults):
"""Read the last vaccination data for all countries."""
files = glob.glob(path + "*.csv")
def read_csv(file):
data = pd.read_csv(file)
print(file)
print(data)
data["people_vaccinated"].fillna(
data["people_fully_vaccinated"], inplace=True)
data = data.ffill()
data_adults = data.copy()
data["days_to_70"] = get_days_to_70(data, "people_fully_vaccinated")
data["week_on_week"] = get_week_on_week(
data, "people_fully_vaccinated")
data = data.iloc[[-1]]
data_adults = data_adults.iloc[[-1]]
data = get_data_hundred_people(data, path_population)
data_adults = get_data_hundred_adults(data_adults, path_adults)
data["days_to_70"] = round(
(70 - data["people_fully_vaccinated"]) / data["days_to_70"], 0)
data["adults_fully_vaccinated"] = data_adults["people_fully_vaccinated"]
data["adults_vaccinated"] = data_adults["people_vaccinated"]
return data
data = pd.concat(map(read_csv, files))
columns = ["date", "location", "people_vaccinated",
"people_fully_vaccinated", "total_vaccinations",
"adults_fully_vaccinated", "adults_vaccinated",
"days_to_70", "week_on_week", "total_boosters"]
data = data[columns]
return data.set_index("location").round(1)
def read_data_past(path, path_population, path_adults):
files = glob.glob(path + "*.csv")
def read_csv(file):
data_prev = | pd.read_csv(file) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
import nose
import numpy as np
from numpy import nan
import pandas as pd
from distutils.version import LooseVersion
from pandas import (Index, Series, DataFrame, Panel, isnull,
date_range, period_range)
from pandas.core.index import MultiIndex
import pandas.core.common as com
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assert_panel_equal,
assert_equal)
import pandas.util.testing as tm
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
raise nose.SkipTest('scipy.interpolate.pchip missing')
# ----------------------------------------------------------------------
# Generic types test cases
class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
pass
@property
def _ndim(self):
return self._typ._AXIS_LEN
def _axes(self):
""" return the axes for my object typ """
return self._typ._AXIS_ORDERS
def _construct(self, shape, value=None, dtype=None, **kwargs):
""" construct an object for the given shape
if value is specified use that if its a scalar
if value is an array, repeat it as needed """
if isinstance(shape, int):
shape = tuple([shape] * self._ndim)
if value is not None:
if np.isscalar(value):
if value == 'empty':
arr = None
# remove the info axis
kwargs.pop(self._typ._info_axis_name, None)
else:
arr = np.empty(shape, dtype=dtype)
arr.fill(value)
else:
fshape = np.prod(shape)
arr = value.ravel()
new_shape = fshape / arr.shape[0]
if fshape % arr.shape[0] != 0:
raise Exception("invalid value passed in _construct")
arr = np.repeat(arr, new_shape).reshape(shape)
else:
arr = np.random.randn(*shape)
return self._typ(arr, dtype=dtype, **kwargs)
def _compare(self, result, expected):
self._comparator(result, expected)
def test_rename(self):
# single axis
for axis in self._axes():
kwargs = {axis: list('ABCD')}
obj = self._construct(4, **kwargs)
# no values passed
# self.assertRaises(Exception, o.rename(str.lower))
# rename a single axis
result = obj.rename(**{axis: str.lower})
expected = obj.copy()
setattr(expected, axis, list('abcd'))
self._compare(result, expected)
# multiple axes at once
def test_get_numeric_data(self):
n = 4
kwargs = {}
for i in range(self._ndim):
kwargs[self._typ._AXIS_NAMES[i]] = list(range(n))
# get the numeric data
o = self._construct(n, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# non-inclusion
result = o._get_bool_data()
expected = self._construct(n, value='empty', **kwargs)
self._compare(result, expected)
# get the bool data
arr = np.array([True, True, False, True])
o = self._construct(n, value=arr, **kwargs)
result = o._get_numeric_data()
self._compare(result, o)
# _get_numeric_data is includes _get_bool_data, so can't test for
# non-inclusion
def test_get_default(self):
# GH 7725
d0 = "a", "b", "c", "d"
d1 = np.arange(4, dtype='int64')
others = "e", 10
for data, index in ((d0, d1), (d1, d0)):
s = Series(data, index=index)
for i, d in zip(index, data):
self.assertEqual(s.get(i), d)
self.assertEqual(s.get(i, d), d)
self.assertEqual(s.get(i, "z"), d)
for other in others:
self.assertEqual(s.get(other, "z"), "z")
self.assertEqual(s.get(other, other), other)
def test_nonzero(self):
# GH 4633
# look at the boolean/nonzero behavior for objects
obj = self._construct(shape=4)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=1)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
obj = self._construct(shape=4, value=np.nan)
self.assertRaises(ValueError, lambda: bool(obj == 0))
self.assertRaises(ValueError, lambda: bool(obj == 1))
self.assertRaises(ValueError, lambda: bool(obj))
# empty
obj = self._construct(shape=0)
self.assertRaises(ValueError, lambda: bool(obj))
# invalid behaviors
obj1 = self._construct(shape=4, value=1)
obj2 = self._construct(shape=4, value=1)
def f():
if obj1:
com.pprint_thing("this works and shouldn't")
self.assertRaises(ValueError, f)
self.assertRaises(ValueError, lambda: obj1 and obj2)
self.assertRaises(ValueError, lambda: obj1 or obj2)
self.assertRaises(ValueError, lambda: not obj1)
def test_numpy_1_7_compat_numeric_methods(self):
# GH 4435
# numpy in 1.7 tries to pass addtional arguments to pandas functions
o = self._construct(shape=4)
for op in ['min', 'max', 'max', 'var', 'std', 'prod', 'sum', 'cumsum',
'cumprod', 'median', 'skew', 'kurt', 'compound', 'cummax',
'cummin', 'all', 'any']:
f = getattr(np, op, None)
if f is not None:
f(o)
def test_downcast(self):
# test close downcasting
o = self._construct(shape=4, value=9, dtype=np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
o = self._construct(shape=4, value=9.)
expected = o.astype(np.int64)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, expected)
o = self._construct(shape=4, value=9.5)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
self._compare(result, o)
# are close
o = self._construct(shape=4, value=9.000000000005)
result = o.copy()
result._data = o._data.downcast(dtypes='infer')
expected = o.astype(np.int64)
self._compare(result, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
return self._construct(shape=3, dtype=dtype)
self.assertRaises(NotImplementedError, f, [("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
f('M8[ns]')
def check_metadata(self, x, y=None):
for m in x._metadata:
v = getattr(x, m, None)
if y is None:
self.assertIsNone(v)
else:
self.assertEqual(v, getattr(y, m, None))
def test_metadata_propagation(self):
# check that the metadata matches up on the resulting ops
o = self._construct(shape=3)
o.name = 'foo'
o2 = self._construct(shape=3)
o2.name = 'bar'
# TODO
# Once panel can do non-trivial combine operations
# (currently there is an a raise in the Panel arith_ops to prevent
# this, though it actually does work)
# can remove all of these try: except: blocks on the actual operations
# ----------
# preserving
# ----------
# simple ops with scalars
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
result = getattr(o, op)(1)
self.check_metadata(o, result)
# ops with like
for op in ['__add__', '__sub__', '__truediv__', '__mul__']:
try:
result = getattr(o, op)(o)
self.check_metadata(o, result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
v1 = getattr(o, op)(o)
self.check_metadata(o, v1)
try:
self.check_metadata(o, v1 & v1)
except (ValueError):
pass
try:
self.check_metadata(o, v1 | v1)
except (ValueError):
pass
# combine_first
try:
result = o.combine_first(o2)
self.check_metadata(o, result)
except (AttributeError):
pass
# ---------------------------
# non-preserving (by default)
# ---------------------------
# add non-like
try:
result = o + o2
self.check_metadata(result)
except (ValueError, AttributeError):
pass
# simple boolean
for op in ['__eq__', '__le__', '__ge__']:
# this is a name matching op
v1 = getattr(o, op)(o)
v2 = getattr(o, op)(o2)
self.check_metadata(v2)
try:
self.check_metadata(v1 & v2)
except (ValueError):
pass
try:
self.check_metadata(v1 | v2)
except (ValueError):
pass
def test_head_tail(self):
# GH5370
o = self._construct(shape=10)
# check all index types
for index in [tm.makeFloatIndex, tm.makeIntIndex, tm.makeStringIndex,
tm.makeUnicodeIndex, tm.makeDateIndex,
tm.makePeriodIndex]:
axis = o._get_axis_name(0)
setattr(o, axis, index(len(getattr(o, axis))))
# Panel + dims
try:
o.head()
except (NotImplementedError):
raise nose.SkipTest('not implemented on {0}'.format(
o.__class__.__name__))
self._compare(o.head(), o.iloc[:5])
self._compare(o.tail(), o.iloc[-5:])
# 0-len
self._compare(o.head(0), o.iloc[0:0])
self._compare(o.tail(0), o.iloc[0:0])
# bounded
self._compare(o.head(len(o) + 1), o)
self._compare(o.tail(len(o) + 1), o)
# neg index
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
def test_sample(self):
# Fixes issue: 2419
o = self._construct(shape=10)
###
# Check behavior of random_state argument
###
# Check for stability when receives seed or random state -- run 10
# times.
for test in range(10):
seed = np.random.randint(0, 100)
self._compare(
o.sample(n=4, random_state=seed), o.sample(n=4,
random_state=seed))
self._compare(
o.sample(frac=0.7, random_state=seed), o.sample(
frac=0.7, random_state=seed))
self._compare(
o.sample(n=4, random_state=np.random.RandomState(test)),
o.sample(n=4, random_state=np.random.RandomState(test)))
self._compare(
o.sample(frac=0.7, random_state=np.random.RandomState(test)),
o.sample(frac=0.7, random_state=np.random.RandomState(test)))
# Check for error when random_state argument invalid.
with tm.assertRaises(ValueError):
o.sample(random_state='astring!')
###
# Check behavior of `frac` and `N`
###
# Giving both frac and N throws error
with tm.assertRaises(ValueError):
o.sample(n=3, frac=0.3)
# Check that raises right error for negative lengths
with tm.assertRaises(ValueError):
o.sample(n=-3)
with tm.assertRaises(ValueError):
o.sample(frac=-0.3)
# Make sure float values of `n` give error
with tm.assertRaises(ValueError):
o.sample(n=3.2)
# Check lengths are right
self.assertTrue(len(o.sample(n=4) == 4))
self.assertTrue(len(o.sample(frac=0.34) == 3))
self.assertTrue(len(o.sample(frac=0.36) == 4))
###
# Check weights
###
# Weight length must be right
with tm.assertRaises(ValueError):
o.sample(n=3, weights=[0, 1])
with tm.assertRaises(ValueError):
bad_weights = [0.5] * 11
o.sample(n=3, weights=bad_weights)
with tm.assertRaises(ValueError):
bad_weight_series = Series([0, 0, 0.2])
o.sample(n=4, weights=bad_weight_series)
# Check won't accept negative weights
with tm.assertRaises(ValueError):
bad_weights = [-0.1] * 10
o.sample(n=3, weights=bad_weights)
# Check inf and -inf throw errors:
with tm.assertRaises(ValueError):
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
o.sample(n=3, weights=weights_with_inf)
with tm.assertRaises(ValueError):
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
o.sample(n=3, weights=weights_with_ninf)
# All zeros raises errors
zero_weights = [0] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=zero_weights)
# All missing weights
nan_weights = [np.nan] * 10
with tm.assertRaises(ValueError):
o.sample(n=3, weights=nan_weights)
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10,
'easyweights': easy_weight_list})
sample1 = df.sample(n=1, weights='easyweights')
assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series, panel, or
# DataFrame with axis = 1.
s = Series(range(10))
with tm.assertRaises(ValueError):
s.sample(n=3, weights='weight_column')
panel = pd.Panel(items=[0, 1, 2], major_axis=[2, 3, 4],
minor_axis=[3, 4, 5])
with tm.assertRaises(ValueError):
panel.sample(n=1, weights='weight_column')
with tm.assertRaises(ValueError):
df.sample(n=1, weights='weight_column', axis=1)
# Check weighting key error
with tm.assertRaises(KeyError):
df.sample(n=3, weights='not_a_real_column_name')
# Check np.nan are replaced by zeros.
weights_with_nan = [np.nan] * 10
weights_with_nan[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_nan), o.iloc[5:6])
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
self._compare(
o.sample(n=1, axis=0, weights=weights_with_None), o.iloc[5:6])
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(
df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = pd.DataFrame({'col1': range(10), 'col2': ['a'] * 10})
second_column_weight = [0, 1]
assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[['col2']])
# Different axis arg types
assert_frame_equal(df.sample(n=1, axis='columns',
weights=second_column_weight),
df[['col2']])
weight = [0] * 10
weight[5] = 0.5
assert_frame_equal(df.sample(n=1, axis='rows', weights=weight),
df.iloc[5:6])
assert_frame_equal(df.sample(n=1, axis='index', weights=weight),
df.iloc[5:6])
# Check out of range axis values
with tm.assertRaises(ValueError):
df.sample(n=1, axis=2)
with tm.assertRaises(ValueError):
df.sample(n=1, axis='not_a_name')
with tm.assertRaises(ValueError):
s = pd.Series(range(10))
s.sample(n=1, axis=1)
# Test weight length compared to correct axis
with tm.assertRaises(ValueError):
df.sample(n=1, axis=1, weights=[0.5] * 10)
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = pd.DataFrame({'col1': range(10, 20),
'col2': range(20, 30),
'colString': ['a'] * 10})
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
assert_frame_equal(sample1, df[['colString']])
# Test default axes
p = pd.Panel(items=['a', 'b', 'c'], major_axis=[2, 4, 6],
minor_axis=[1, 3, 5])
assert_panel_equal(
p.sample(n=3, random_state=42), p.sample(n=3, axis=1,
random_state=42))
assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0,
random_state=42))
# Test that function aligns weights with frame
df = DataFrame(
{'col1': [5, 6, 7],
'col2': ['a', 'b', 'c'], }, index=[9, 5, 3])
s = Series([1, 0, 0], index=[3, 5, 9])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s))
# Weights have index values to be dropped because not in
# sampled DataFrame
s2 = Series([0.001, 0, 10000], index=[3, 5, 10])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s2))
# Weights have empty values to be filed with zeros
s3 = Series([0.01, 0], index=[3, 5])
assert_frame_equal(df.loc[[3]], df.sample(1, weights=s3))
# No overlap in weight and sampled DataFrame indices
s4 = Series([1, 0], index=[1, 2])
with tm.assertRaises(ValueError):
df.sample(1, weights=s4)
def test_size_compat(self):
# GH8846
# size property should be defined
o = self._construct(shape=10)
self.assertTrue(o.size == np.prod(o.shape))
self.assertTrue(o.size == 10 ** len(o.axes))
def test_split_compat(self):
# xref GH8846
o = self._construct(shape=10)
self.assertTrue(len(np.array_split(o, 5)) == 5)
self.assertTrue(len(np.array_split(o, 2)) == 2)
def test_unexpected_keyword(self): # GH8597
from pandas.util.testing import assertRaisesRegexp
df = DataFrame(np.random.randn(5, 2), columns=['jim', 'joe'])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df['joe'].copy()
ts[2] = np.nan
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.drop('joe', axis=1, in_place=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
df.reindex([1, 0], inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ca.fillna(0, inplace=True)
with assertRaisesRegexp(TypeError, 'unexpected keyword'):
ts.fillna(0, in_place=True)
class TestSeries(tm.TestCase, Generic):
_typ = Series
_comparator = lambda self, x, y: assert_series_equal(x, y)
def setUp(self):
self.ts = tm.makeTimeSeries() # Was at top level in test_series
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
def test_rename_mi(self):
s = Series([11, 21, 31],
index=MultiIndex.from_tuples(
[("A", x) for x in ["a", "B", "c"]]))
s.rename(str.lower)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = Series([1, 2, 3])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([1, '2', 3.])
result = o._get_numeric_data()
expected = Series([], dtype=object, index=pd.Index([], dtype=object))
self._compare(result, expected)
o = Series([True, False, True])
result = o._get_numeric_data()
self._compare(result, o)
o = Series([True, False, True])
result = o._get_bool_data()
self._compare(result, o)
o = Series(date_range('20130101', periods=3))
result = o._get_numeric_data()
expected = Series([], dtype='M8[ns]', index=pd.Index([], dtype=object))
self._compare(result, expected)
def test_nonzero_single_element(self):
# allow single item via bool method
s = Series([True])
self.assertTrue(s.bool())
s = Series([False])
self.assertFalse(s.bool())
# single item nan to raise
for s in [Series([np.nan]), Series([pd.NaT]), Series([True]),
Series([False])]:
self.assertRaises(ValueError, lambda: bool(s))
for s in [Series([np.nan]), Series([pd.NaT])]:
self.assertRaises(ValueError, lambda: s.bool())
# multiple bool are still an error
for s in [Series([True, True]), Series([False, False])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
# single non-bool are an error
for s in [Series([1]), Series([0]), Series(['a']), Series([0.0])]:
self.assertRaises(ValueError, lambda: bool(s))
self.assertRaises(ValueError, lambda: s.bool())
def test_metadata_propagation_indiv(self):
# check that the metadata matches up on the resulting ops
o = Series(range(3), range(3))
o.name = 'foo'
o2 = Series(range(3), range(3))
o2.name = 'bar'
result = o.T
self.check_metadata(o, result)
# resample
ts = Series(np.random.rand(1000),
index=date_range('20130101', periods=1000, freq='s'),
name='foo')
result = ts.resample('1T').mean()
self.check_metadata(ts, result)
result = ts.resample('1T').min()
self.check_metadata(ts, result)
result = ts.resample('1T').apply(lambda x: x.sum())
self.check_metadata(ts, result)
_metadata = Series._metadata
_finalize = Series.__finalize__
Series._metadata = ['name', 'filename']
o.filename = 'foo'
o2.filename = 'bar'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat' and name == 'filename':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
Series.__finalize__ = finalize
result = pd.concat([o, o2])
self.assertEqual(result.filename, 'foo+bar')
self.assertIsNone(result.name)
# reset
Series._metadata = _metadata
Series.__finalize__ = _finalize
def test_interpolate(self):
ts = Series(np.arange(len(self.ts), dtype=float), self.ts.index)
ts_copy = ts.copy()
ts_copy[5:10] = np.NaN
linear_interp = ts_copy.interpolate(method='linear')
self.assert_numpy_array_equal(linear_interp, ts)
ord_ts = Series([d.toordinal() for d in self.ts.index],
index=self.ts.index).astype(float)
ord_ts_copy = ord_ts.copy()
ord_ts_copy[5:10] = np.NaN
time_interp = ord_ts_copy.interpolate(method='time')
self.assert_numpy_array_equal(time_interp, ord_ts)
# try time interpolation on a non-TimeSeries
# Only raises ValueError if there are NaNs.
non_ts = self.series.copy()
non_ts[0] = np.NaN
self.assertRaises(ValueError, non_ts.interpolate, method='time')
def test_interp_regression(self):
tm._skip_if_no_scipy()
_skip_if_no_pchip()
ser = Series(np.sort(np.random.uniform(size=100)))
# interpolate at new_index
new_index = ser.index.union(Index([49.25, 49.5, 49.75, 50.25, 50.5,
50.75]))
interp_s = ser.reindex(new_index).interpolate(method='pchip')
# does not blow up, GH5977
interp_s[49:51]
def test_interpolate_corners(self):
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(), s)
tm._skip_if_no_scipy()
s = Series([np.nan, np.nan])
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
s = Series([]).interpolate()
assert_series_equal(s.interpolate(method='polynomial', order=1), s)
def test_interpolate_index_values(self):
s = Series(np.nan, index=np.sort(np.random.rand(30)))
s[::3] = np.random.randn(10)
vals = s.index.values.astype(float)
result = s.interpolate(method='index')
expected = s.copy()
bad = isnull(expected.values)
good = ~bad
expected = Series(np.interp(vals[bad], vals[good],
s.values[good]),
index=s.index[bad])
assert_series_equal(result[bad], expected)
# 'values' is synonymous with 'index' for the method kwarg
other_result = s.interpolate(method='values')
assert_series_equal(other_result, result)
assert_series_equal(other_result[bad], expected)
def test_interpolate_non_ts(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
with tm.assertRaises(ValueError):
s.interpolate(method='time')
# New interpolation tests
def test_nan_interpolate(self):
s = Series([0, 1, np.nan, 3])
result = s.interpolate()
expected = Series([0., 1., 2., 3.])
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, expected)
def test_nan_irregular_index(self):
s = Series([1, 2, np.nan, 4], index=[1, 3, 5, 9])
result = s.interpolate()
expected = Series([1., 2., 3., 4.], index=[1, 3, 5, 9])
assert_series_equal(result, expected)
def test_nan_str_index(self):
s = Series([0, 1, 2, np.nan], index=list('abcd'))
result = s.interpolate()
expected = Series([0., 1., 2., 2.], index=list('abcd'))
assert_series_equal(result, expected)
def test_interp_quad(self):
tm._skip_if_no_scipy()
sq = Series([1, 4, np.nan, 16], index=[1, 2, 3, 4])
result = sq.interpolate(method='quadratic')
expected = Series([1., 4., 9., 16.], index=[1, 2, 3, 4])
assert_series_equal(result, expected)
def test_interp_scipy_basic(self):
tm._skip_if_no_scipy()
s = Series([1, 3, np.nan, 12, np.nan, 25])
# slinear
expected = Series([1., 3., 7.5, 12., 18.5, 25.])
result = s.interpolate(method='slinear')
assert_series_equal(result, expected)
result = s.interpolate(method='slinear', downcast='infer')
assert_series_equal(result, expected)
# nearest
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='nearest')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='nearest', downcast='infer')
assert_series_equal(result, expected)
# zero
expected = Series([1, 3, 3, 12, 12, 25])
result = s.interpolate(method='zero')
assert_series_equal(result, expected.astype('float'))
result = s.interpolate(method='zero', downcast='infer')
assert_series_equal(result, expected)
# quadratic
expected = Series([1, 3., 6.769231, 12., 18.230769, 25.])
result = s.interpolate(method='quadratic')
assert_series_equal(result, expected)
result = s.interpolate(method='quadratic', downcast='infer')
assert_series_equal(result, expected)
# cubic
expected = Series([1., 3., 6.8, 12., 18.2, 25.])
result = s.interpolate(method='cubic')
assert_series_equal(result, expected)
def test_interp_limit(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2)
assert_series_equal(result, expected)
def test_interp_limit_forward(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
# Provide 'forward' (the default) explicitly here.
expected = Series([1., 3., 5., 7., np.nan, 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='forward')
assert_series_equal(result, expected)
result = s.interpolate(method='linear', limit=2,
limit_direction='FORWARD')
assert_series_equal(result, expected)
def test_interp_limit_bad_direction(self):
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
self.assertRaises(ValueError, s.interpolate, method='linear', limit=2,
limit_direction='abc')
# raises an error even if no limit is specified.
self.assertRaises(ValueError, s.interpolate, method='linear',
limit_direction='abc')
def test_interp_limit_direction(self):
# These tests are for issue #9218 -- fill NaNs in both directions.
s = Series([1, 3, np.nan, np.nan, np.nan, 11])
expected = Series([1., 3., np.nan, 7., 9., 11.])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([1., 3., 5., np.nan, 9., 11.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
# Check that this works on a longer series of nans.
s = Series([1, 3, np.nan, np.nan, np.nan, 7, 9, np.nan, np.nan, 12,
np.nan])
expected = Series([1., 3., 4., 5., 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
expected = Series([1., 3., 4., np.nan, 6., 7., 9., 10., 11., 12., 12.])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_to_ends(self):
# These test are for issue #10420 -- flow back to beginning.
s = Series([np.nan, np.nan, 5, 7, 9, np.nan])
expected = Series([5., 5., 5., 7., 9., np.nan])
result = s.interpolate(method='linear', limit=2,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([5., 5., 5., 7., 9., 9.])
result = s.interpolate(method='linear', limit=2,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_limit_before_ends(self):
# These test are for issue #11115 -- limit ends properly.
s = Series([np.nan, np.nan, 5, 7, np.nan, np.nan])
expected = Series([np.nan, np.nan, 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='forward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., np.nan, np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='backward')
assert_series_equal(result, expected)
expected = Series([np.nan, 5., 5., 7., 7., np.nan])
result = s.interpolate(method='linear', limit=1,
limit_direction='both')
assert_series_equal(result, expected)
def test_interp_all_good(self):
# scipy
tm._skip_if_no_scipy()
s = Series([1, 2, 3])
result = s.interpolate(method='polynomial', order=1)
assert_series_equal(result, s)
# non-scipy
result = s.interpolate()
assert_series_equal(result, s)
def test_interp_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s = Series([1, 2, np.nan], index=idx)
expected = s.copy()
expected.loc[2] = 2
result = s.interpolate()
assert_series_equal(result, expected)
tm._skip_if_no_scipy()
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial', order=1)
def test_interp_nonmono_raise(self):
tm._skip_if_no_scipy()
s = Series([1, np.nan, 3], index=[0, 2, 1])
with tm.assertRaises(ValueError):
s.interpolate(method='krogh')
def test_interp_datetime64(self):
tm._skip_if_no_scipy()
df = Series([1, np.nan, 3], index=date_range('1/1/2000', periods=3))
result = df.interpolate(method='nearest')
expected = Series([1., 1., 3.],
index=date_range('1/1/2000', periods=3))
assert_series_equal(result, expected)
def test_interp_limit_no_nans(self):
# GH 7173
s = pd.Series([1., 2., 3.])
result = s.interpolate(limit=1)
expected = s
assert_series_equal(result, expected)
def test_describe(self):
self.series.describe()
self.ts.describe()
def test_describe_objects(self):
s = Series(['a', 'b', 'b', np.nan, np.nan, np.nan, 'c', 'd', 'a', 'a'])
result = s.describe()
expected = Series({'count': 7, 'unique': 4,
'top': 'a', 'freq': 3}, index=result.index)
assert_series_equal(result, expected)
dt = list(self.ts.index)
dt.append(dt[0])
ser = Series(dt)
rs = ser.describe()
min_date = min(dt)
max_date = max(dt)
xp = Series({'count': len(dt),
'unique': len(self.ts.index),
'first': min_date, 'last': max_date, 'freq': 2,
'top': min_date}, index=rs.index)
assert_series_equal(rs, xp)
def test_describe_empty(self):
result = pd.Series().describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
nanSeries = Series([np.nan])
nanSeries.name = 'NaN'
result = nanSeries.describe()
self.assertEqual(result['count'], 0)
self.assertTrue(result.drop('count').isnull().all())
def test_describe_none(self):
noneSeries = Series([None])
noneSeries.name = 'None'
expected = Series([0, 0], index=['count', 'unique'], name='None')
assert_series_equal(noneSeries.describe(), expected)
class TestDataFrame(tm.TestCase, Generic):
_typ = DataFrame
_comparator = lambda self, x, y: assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame([
11, 21, 31
], index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]))
df.rename(str.lower)
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
self.assertTrue(df.bool())
df = DataFrame([[False]])
self.assertFalse(df.bool())
df = DataFrame([[False, False]])
self.assertRaises(ValueError, lambda: df.bool())
self.assertRaises(ValueError, lambda: bool(df))
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({'A': [1, '2', 3.]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with tm.assertRaises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with tm.assertRaises(NotImplementedError):
df.interpolate(method='values')
def test_interp_various(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
def test_interp_alt_scipy(self):
tm._skip_if_no_scipy()
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.ix[2, 'A'] = 3
expected.ix[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.ix[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= '0.17.0':
expected.ix[5, 'A'] = 6.0
else:
expected.ix[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
# scipy route
tm._skip_if_no_scipy()
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
def test_interp_leading_nans(self):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
tm._skip_if_no_scipy()
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with tm.assertRaises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
def test_describe(self):
tm.makeDataFrame().describe()
tm.makeMixedDataFrame().describe()
tm.makeTimeDataFrame().describe()
def test_describe_percentiles_percent_or_raw(self):
msg = 'percentiles should all be in the interval \\[0, 1\\]'
df = tm.makeDataFrame()
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[10, 50, 100])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[2])
with tm.assertRaisesRegexp(ValueError, msg):
df.describe(percentiles=[-2])
def test_describe_percentiles_equivalence(self):
df = tm.makeDataFrame()
d1 = df.describe()
d2 = df.describe(percentiles=[.25, .75])
assert_frame_equal(d1, d2)
def test_describe_percentiles_insert_median(self):
df = tm.makeDataFrame()
d1 = df.describe(percentiles=[.25, .75])
d2 = df.describe(percentiles=[.25, .5, .75])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('75%' in d2.index)
# none above
d1 = df.describe(percentiles=[.25, .45])
d2 = df.describe(percentiles=[.25, .45, .5])
assert_frame_equal(d1, d2)
self.assertTrue('25%' in d1.index)
self.assertTrue('45%' in d2.index)
# none below
d1 = df.describe(percentiles=[.75, 1])
d2 = df.describe(percentiles=[.5, .75, 1])
assert_frame_equal(d1, d2)
self.assertTrue('75%' in d1.index)
self.assertTrue('100%' in d2.index)
# edge
d1 = df.describe(percentiles=[0, 1])
d2 = df.describe(percentiles=[0, .5, 1])
assert_frame_equal(d1, d2)
self.assertTrue('0%' in d1.index)
self.assertTrue('100%' in d2.index)
def test_describe_no_numeric(self):
df = DataFrame({'A': ['foo', 'foo', 'bar'] * 8,
'B': ['a', 'b', 'c', 'd'] * 6})
desc = df.describe()
expected = DataFrame(dict((k, v.describe())
for k, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(desc, expected)
ts = tm.makeTimeSeries()
df = DataFrame({'time': ts.index})
desc = df.describe()
self.assertEqual(desc.time['first'], min(ts.index))
def test_describe_empty_int_columns(self):
df = DataFrame([[0, 1], [1, 2]])
desc = df[df[0] < 0].describe() # works
assert_series_equal(desc.xs('count'),
Series([0, 0], dtype=float, name='count'))
self.assertTrue(isnull(desc.ix[1:]).all().all())
def test_describe_objects(self):
df = DataFrame({"C1": ['a', 'a', 'c'], "C2": ['d', 'd', 'f']})
result = df.describe()
expected = DataFrame({"C1": [3, 2, 'a', 2], "C2": [3, 2, 'd', 2]},
index=['count', 'unique', 'top', 'freq'])
assert_frame_equal(result, expected)
df = DataFrame({"C1": pd.date_range('2010-01-01', periods=4, freq='D')
})
df.loc[4] = pd.Timestamp('2010-01-04')
result = df.describe()
expected = DataFrame({"C1": [5, 4, pd.Timestamp('2010-01-04'), 2,
pd.Timestamp('2010-01-01'),
pd.Timestamp('2010-01-04')]},
index=['count', 'unique', 'top', 'freq',
'first', 'last'])
assert_frame_equal(result, expected)
# mix time and str
df['C2'] = ['a', 'a', 'b', 'c', 'a']
result = df.describe()
expected['C2'] = [5, 3, 'a', 3, np.nan, np.nan]
assert_frame_equal(result, expected)
# just str
expected = DataFrame({'C2': [5, 3, 'a', 4]},
index=['count', 'unique', 'top', 'freq'])
result = df[['C2']].describe()
# mix of time, str, numeric
df['C3'] = [2, 4, 6, 8, 2]
result = df.describe()
expected = DataFrame({"C3": [5., 4.4, 2.607681, 2., 2., 4., 6., 8.]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
assert_frame_equal(result, expected)
assert_frame_equal(df.describe(), df[['C3']].describe())
assert_frame_equal(df[['C1', 'C3']].describe(), df[['C3']].describe())
assert_frame_equal(df[['C2', 'C3']].describe(), df[['C3']].describe())
def test_describe_typefiltering(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24, dtype='int64'),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
descN = df.describe()
expected_cols = ['numC', 'numD', ]
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descN, expected)
desc = df.describe(include=['number'])
assert_frame_equal(desc, descN)
desc = df.describe(exclude=['object', 'datetime'])
assert_frame_equal(desc, descN)
desc = df.describe(include=['float'])
assert_frame_equal(desc, descN.drop('numC', 1))
descC = df.describe(include=['O'])
expected_cols = ['catA', 'catB']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(descC, expected)
descD = df.describe(include=['datetime'])
assert_series_equal(descD.ts, df.ts.describe())
desc = df.describe(include=['object', 'number', 'datetime'])
assert_frame_equal(desc.loc[:, ["numC", "numD"]].dropna(), descN)
assert_frame_equal(desc.loc[:, ["catA", "catB"]].dropna(), descC)
descDs = descD.sort_index() # the index order change for mixed-types
assert_frame_equal(desc.loc[:, "ts":].dropna().sort_index(), descDs)
desc = df.loc[:, 'catA':'catB'].describe(include='all')
assert_frame_equal(desc, descC)
desc = df.loc[:, 'numC':'numD'].describe(include='all')
assert_frame_equal(desc, descN)
desc = df.describe(percentiles=[], include='all')
cnt = Series(data=[4, 4, 6, 6, 6],
index=['catA', 'catB', 'numC', 'numD', 'ts'])
assert_series_equal(desc.count(), cnt)
self.assertTrue('count' in desc.index)
self.assertTrue('unique' in desc.index)
self.assertTrue('50%' in desc.index)
self.assertTrue('first' in desc.index)
desc = df.drop("ts", 1).describe(percentiles=[], include='all')
assert_series_equal(desc.count(), cnt.drop("ts"))
self.assertTrue('first' not in desc.index)
desc = df.drop(["numC", "numD"], 1).describe(percentiles=[],
include='all')
assert_series_equal(desc.count(), cnt.drop(["numC", "numD"]))
self.assertTrue('50%' not in desc.index)
def test_describe_typefiltering_category_bool(self):
df = DataFrame({'A_cat': pd.Categorical(['foo', 'foo', 'bar'] * 8),
'B_str': ['a', 'b', 'c', 'd'] * 6,
'C_bool': [True] * 12 + [False] * 12,
'D_num': np.arange(24.) + .5,
'E_ts': tm.makeTimeSeries()[:24].index})
# bool is considered numeric in describe, although not an np.number
desc = df.describe()
expected_cols = ['C_bool', 'D_num']
expected = DataFrame(dict((k, df[k].describe())
for k in expected_cols),
columns=expected_cols)
assert_frame_equal(desc, expected)
desc = df.describe(include=["category"])
self.assertTrue(desc.columns.tolist() == ["A_cat"])
# 'all' includes numpy-dtypes + category
desc1 = df.describe(include="all")
desc2 = df.describe(include=[np.generic, "category"])
assert_frame_equal(desc1, desc2)
def test_describe_timedelta(self):
df = DataFrame({"td": pd.to_timedelta(np.arange(24) % 20, "D")})
self.assertTrue(df.describe().loc["mean"][0] == pd.to_timedelta(
"8d4h"))
def test_describe_typefiltering_dupcol(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
s = df.describe(include='all').shape[1]
df = pd.concat([df, df], axis=1)
s2 = df.describe(include='all').shape[1]
self.assertTrue(s2 == 2 * s)
def test_describe_typefiltering_groupby(self):
df = DataFrame({'catA': ['foo', 'foo', 'bar'] * 8,
'catB': ['a', 'b', 'c', 'd'] * 6,
'numC': np.arange(24),
'numD': np.arange(24.) + .5,
'ts': tm.makeTimeSeries()[:24].index})
G = df.groupby('catA')
self.assertTrue(G.describe(include=['number']).shape == (16, 2))
self.assertTrue(G.describe(include=['number', 'object']).shape == (22,
3))
self.assertTrue(G.describe(include='all').shape == (26, 4))
def test_describe_multi_index_df_column_names(self):
""" Test that column names persist after the describe operation."""
df = pd.DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
# GH 11517
# test for hierarchical index
hierarchical_index_df = df.groupby(['A', 'B']).mean().T
self.assertTrue(hierarchical_index_df.columns.names == ['A', 'B'])
self.assertTrue(hierarchical_index_df.describe().columns.names ==
['A', 'B'])
# test for non-hierarchical index
non_hierarchical_index_df = df.groupby(['A']).mean().T
self.assertTrue(non_hierarchical_index_df.columns.names == ['A'])
self.assertTrue(non_hierarchical_index_df.describe().columns.names ==
['A'])
def test_no_order(self):
tm._skip_if_no_scipy()
s = Series([0, 1, np.nan, 3])
with tm.assertRaises(ValueError):
s.interpolate(method='polynomial')
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
def test_spline(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5, np.nan, 7])
result = s.interpolate(method='spline', order=1)
expected = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result, expected)
def test_spline_extrapolate(self):
tm.skip_if_no_package(
'scipy', '0.15',
'setting ext on scipy.interpolate.UnivariateSpline')
s = Series([1, 2, 3, 4, np.nan, 6, np.nan])
result3 = s.interpolate(method='spline', order=1, ext=3)
expected3 = Series([1., 2., 3., 4., 5., 6., 6.])
assert_series_equal(result3, expected3)
result1 = s.interpolate(method='spline', order=1, ext=0)
expected1 = Series([1., 2., 3., 4., 5., 6., 7.])
assert_series_equal(result1, expected1)
def test_spline_smooth(self):
tm._skip_if_no_scipy()
s = Series([1, 2, np.nan, 4, 5.1, np.nan, 7])
self.assertNotEqual(s.interpolate(method='spline', order=3, s=0)[5],
s.interpolate(method='spline', order=3)[5])
def test_spline_interpolation(self):
tm._skip_if_no_scipy()
s = Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
result1 = s.interpolate(method='spline', order=1)
expected1 = s.interpolate(method='spline', order=1)
assert_series_equal(result1, expected1)
# GH #10633
def test_spline_error(self):
tm._skip_if_no_scipy()
s = pd.Series(np.arange(10) ** 2)
s[np.random.randint(0, 9, 3)] = np.nan
with tm.assertRaises(ValueError):
s.interpolate(method='spline')
with tm.assertRaises(ValueError):
s.interpolate(method='spline', order=0)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
result = df.groupby('A').sum()
self.check_metadata(df, result)
# resample
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20130101', periods=1000, freq='s'))
result = df.resample('1T')
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['a', 'b'])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=['c', 'd'])
DataFrame._metadata = ['filename']
df1.filename = 'fname1.csv'
df2.filename = 'fname2.csv'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'merge':
left, right = other.left, other.right
value = getattr(left, name, '') + '|' + getattr(right,
name, '')
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ''))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=['a'], right_on=['c'], how='inner')
self.assertEqual(result.filename, 'fname1.csv|fname2.csv')
# concat
# GH 6927
DataFrame._metadata = ['filename']
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list('ab'))
df1.filename = 'foo'
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == 'concat':
value = '+'.join([getattr(
o, name) for o in other.objs if getattr(o, name, None)
])
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
self.assertEqual(result.filename, 'foo+foo')
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize
def test_tz_convert_and_localize(self):
l0 = date_range('20140701', periods=5, freq='D')
# TODO: l1 should be a PeriodIndex for testing
# after GH2106 is addressed
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_convert('UTC')
with tm.assertRaises(NotImplementedError):
period_range('20140701', periods=1).tz_localize('UTC')
# l1 = period_range('20140701', periods=5, freq='D')
l1 = date_range('20140701', periods=5, freq='D')
int_idx = Index(range(5))
for fn in ['tz_localize', 'tz_convert']:
if fn == 'tz_convert':
l0 = l0.tz_localize('UTC')
l1 = l1.tz_localize('UTC')
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)('US/Pacific')
l1_expected = getattr(idx, fn)('US/Pacific')
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)('US/Pacific')
self.assertTrue(df1.index.equals(l0_expected))
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)('US/Pacific', level=0)
self.assertFalse(df3.index.levels[0].equals(l0))
self.assertTrue(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1))
self.assertFalse(df3.index.levels[1].equals(l1_expected))
df3 = getattr(df2, fn)('US/Pacific', level=1)
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
df4 = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)('US/Pacific', level=1) # noqa
self.assertTrue(df3.index.levels[0].equals(l0))
self.assertFalse(df3.index.levels[0].equals(l0_expected))
self.assertTrue(df3.index.levels[1].equals(l1_expected))
self.assertFalse(df3.index.levels[1].equals(l1))
# Bad Inputs
for fn in ['tz_localize', 'tz_convert']:
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(index=int_idx)
df = getattr(df, fn)('US/Pacific')
# Not DatetimeIndex / PeriodIndex
with tm.assertRaisesRegexp(TypeError, 'DatetimeIndex'):
df = DataFrame(np.ones(5),
MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)('US/Pacific', level=0)
# Invalid level
with tm.assertRaisesRegexp(ValueError, 'not valid'):
df = DataFrame(index=l0)
df = getattr(df, fn)('US/Pacific', level=1)
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({'x': [1, 2, 3]})
df.y = 2
df['y'] = [2, 4, 6]
df.y = 5
assert_equal(df.y, 5)
assert_series_equal(df['y'], Series([2, 4, 6], name='y'))
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
mask = pnl.isnull()
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
expected[mask] = np.nan
result = pnl.pct_change(axis=axis, fill_method='pad')
self.assert_frame_equal(result, expected)
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
class TestNDFrame(tm.TestCase):
# tests that don't fit elsewhere
def test_squeeze(self):
# noop
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries()]:
tm.assert_series_equal(s.squeeze(), s)
for df in [tm.makeTimeDataFrame()]:
tm.assert_frame_equal(df.squeeze(), df)
for p in [tm.makePanel()]:
tm.assert_panel_equal(p.squeeze(), p)
for p4d in [tm.makePanel4D()]:
tm.assert_panel4d_equal(p4d.squeeze(), p4d)
# squeezing
df = tm.makeTimeDataFrame().reindex(columns=['A'])
tm.assert_series_equal(df.squeeze(), df['A'])
p = tm.makePanel().reindex(items=['ItemA'])
tm.assert_frame_equal(p.squeeze(), p['ItemA'])
p = tm.makePanel().reindex(items=['ItemA'], minor_axis=['A'])
tm.assert_series_equal(p.squeeze(), p.ix['ItemA', :, 'A'])
p4d = tm.makePanel4D().reindex(labels=['label1'])
tm.assert_panel_equal(p4d.squeeze(), p4d['label1'])
p4d = tm.makePanel4D().reindex(labels=['label1'], items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(), p4d.ix['label1', 'ItemA'])
# don't fail with 0 length dimensions GH11229 & GH8999
empty_series = pd.Series([], name='five')
empty_frame = pd.DataFrame([empty_series])
empty_panel = pd.Panel({'six': empty_frame})
[tm.assert_series_equal(empty_series, higher_dim.squeeze())
for higher_dim in [empty_series, empty_frame, empty_panel]]
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s1[1] = 99
self.assertFalse(s1.equals(s2))
# NaNs compare as equal
s1 = pd.Series([1, np.nan, 3, np.nan], index=[0, 2, 1, 3])
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
s2[0] = 9.9
self.assertFalse(s1.equals(s2))
idx = MultiIndex.from_tuples([(0, 'a'), (1, 'b'), (2, 'c')])
s1 = Series([1, 2, np.nan], index=idx)
s2 = s1.copy()
self.assertTrue(s1.equals(s2))
# Add object dtype column with nans
index = np.random.random(10)
df1 = DataFrame(
np.random.random(10, ), index=index, columns=['floats'])
df1['text'] = 'the sky is so blue. we could use more chocolate.'.split(
)
df1['start'] = date_range('2000-1-1', periods=10, freq='T')
df1['end'] = date_range('2000-1-1', periods=10, freq='D')
df1['diff'] = df1['end'] - df1['start']
df1['bool'] = (np.arange(10) % 3 == 0)
df1.ix[::2] = nan
df2 = df1.copy()
self.assertTrue(df1['text'].equals(df2['text']))
self.assertTrue(df1['start'].equals(df2['start']))
self.assertTrue(df1['end'].equals(df2['end']))
self.assertTrue(df1['diff'].equals(df2['diff']))
self.assertTrue(df1['bool'].equals(df2['bool']))
self.assertTrue(df1.equals(df2))
self.assertFalse(df1.equals(object))
# different dtype
different = df1.copy()
different['floats'] = different['floats'].astype('float32')
self.assertFalse(df1.equals(different))
# different index
different_index = -index
different = df2.set_index(different_index)
self.assertFalse(df1.equals(different))
# different columns
different = df2.copy()
different.columns = df2.columns[::-1]
self.assertFalse(df1.equals(different))
# DatetimeIndex
index = pd.date_range('2000-1-1', periods=10, freq='T')
df1 = df1.set_index(index)
df2 = df1.copy()
self.assertTrue(df1.equals(df2))
# MultiIndex
df3 = df1.set_index(['text'], append=True)
df2 = df1.set_index(['text'], append=True)
self.assertTrue(df3.equals(df2))
df2 = df1.set_index(['floats'], append=True)
self.assertFalse(df3.equals(df2))
# NaN in index
df3 = df1.set_index(['floats'], append=True)
df2 = df1.set_index(['floats'], append=True)
self.assertTrue(df3.equals(df2))
# GH 8437
a = pd.Series([False, np.nan])
b = pd.Series([False, np.nan])
c = pd.Series(index=range(2))
d = pd.Series(index=range(2))
e = pd.Series(index=range(2))
f = pd.Series(index=range(2))
c[:-1] = d[:-1] = e[0] = f[0] = False
self.assertTrue(a.equals(a))
self.assertTrue(a.equals(b))
self.assertTrue(a.equals(c))
self.assertTrue(a.equals(d))
self.assertFalse(a.equals(e))
self.assertTrue(e.equals(f))
def test_describe_raises(self):
with tm.assertRaises(NotImplementedError):
tm.makePanel().describe()
def test_pipe(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: x ** y
result = df.pipe(f, 2)
expected = DataFrame({'A': [1, 4, 9]})
self.assert_frame_equal(result, expected)
result = df.A.pipe(f, 2)
self.assert_series_equal(result, expected.A)
def test_pipe_tuple(self):
df = DataFrame({'A': [1, 2, 3]})
f = lambda x, y: y
result = df.pipe((f, 'y'), 0)
self.assert_frame_equal(result, df)
result = df.A.pipe((f, 'y'), 0)
self.assert_series_equal(result, df.A)
def test_pipe_tuple_error(self):
df = | DataFrame({"A": [1, 2, 3]}) | pandas.DataFrame |
# standard modules
import os
import shutil
import argparse
# aliased standard modules
import pandas as pd
# modules of sanity checker
import lib.paths as paths
import lib.utils as utils
import lib.logger_config as logger_config
# standalone imports
from lib.logger_config import log
from lib.test_config import get_config_of_current_test
from lib.color import Style
'''
Module providing the functionality to add an experiment
to the reference pool. It contains:
- add_line_descr_f: Add a new line to the experiment description file
with all information about an experiment
- main: asks user for additional information about experiment, commits
data of new experiment to git-repository
Help: python add_exp_tp_ref.py --help
C.Siegenthaler 07.2020 (C2SM)
J.Jucker 01.2021 (C2SM)
'''
def add_line_descr_f(exp,f_exp_descr):
'''
Add line for exp exp in file f_exp_descr
:param exp: new expirement name
:param f_exp_descr: file in which the new line has to be added
return: None
'''
log.info('Adding line {} in the file {}:'.format(exp,f_exp_descr))
# open file in dataframe
if not os.path.isfile(f_exp_descr):
# create dataframe
cols_exp_descr_f = ['Experiment name',
'Platform',
'OS',
'Compiler (with version)',
'Optimisation level (-OX)',
'-fast-transcendentals (y/n)',
'-no-prec-sqrt (y/n)',
'-no-prec-div (y/n)',
'welch (y/n)',
'fldcor (y/n)',
'rmse (y/n)',
'emi (y/n)',
'Date of experiment (month yyyy)']
df_exp_descr = pd.DataFrame(columns=cols_exp_descr_f)
else:
df_exp_descr = | pd.read_csv(f_exp_descr, sep=';') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 6 08:51:19 2019
@author: dipesh
"""
# Import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.callbacks import EarlyStopping, ModelCheckpoint, History
# Load dataset
df = | pd.read_csv('bank-additional-full.csv') | pandas.read_csv |
import sqlite3
import sqlalchemy as sa
import pandas as pd
from powergenome.params import DATA_PATHS
from powergenome.util import init_pudl_connection
GENS860_COLS = [
"report_date",
"plant_id_eia",
"generator_id",
# "associated_combined_heat_power",
# "balancing_authority_code_eia",
# "bypass_heat_recovery",
"capacity_mw",
# "county",
"current_planned_operating_date",
"energy_source_code_1",
# "ferc_cogen_status",
# "iso_rto_code",
# "latitude",
# "longitude",
"minimum_load_mw",
# "operating_date",
"operational_status_code",
# "original_planned_operating_date",
# "state",
"summer_capacity_mw",
"technology_description",
# "unit_id_pudl",
"winter_capacity_mw",
"fuel_type_code_pudl",
# "zip_code",
"planned_retirement_date",
"time_cold_shutdown_full_load_code",
"switch_oil_gas",
"planned_new_capacity_mw",
"energy_source_code_2",
"region",
]
GEN_FUEL_COLS = [
"report_date",
"plant_id_eia",
"energy_source_code",
"fuel_consumed_for_electricity_mmbtu",
"fuel_consumed_for_electricity_units",
"fuel_consumed_mmbtu",
"fuel_consumed_units",
"fuel_mmbtu_per_unit",
"net_generation_mwh",
"prime_mover_code",
"fuel_type_code_pudl",
]
ENTITY_COLS = ["plant_id_eia", "generator_id", "prime_mover_code", "operating_date"]
def create_testing_db():
pudl_engine, pudl_out, pg_engine = init_pudl_connection(
start_year=2018, end_year=2020
)
pudl_test_conn = sqlite3.connect(DATA_PATHS["test_data"] / "pudl_test_data.db")
plant_region = pd.read_sql_table("plant_region_map_epaipm", pg_engine)
# gens_860 = pudl_out.gens_eia860()
s = "SELECT * from generators_eia860 where strftime('%Y',report_date)='2020'"
gens_860 = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
# gens_860 = gens_860.loc[gens_860.report_date.dt.year == 2020, :]
gens_860 = pd.merge(gens_860, plant_region, on="plant_id_eia", how="inner")
gens_860 = gens_860.loc[:, GENS860_COLS]
gens_860 = gens_860.groupby(
["region", "technology_description"], as_index=False
).head(10)
gens_860 = gens_860.drop(columns="region")
eia_plant_ids = gens_860["plant_id_eia"].unique()
gen_entity = pd.read_sql_table("generators_entity_eia", pudl_engine)
gen_entity = gen_entity.loc[
gen_entity["plant_id_eia"].isin(eia_plant_ids), ENTITY_COLS
]
bga = pudl_out.bga_eia860()
bga = bga.loc[
(bga.report_date.dt.year == 2020) & (bga.plant_id_eia.isin(eia_plant_ids)), :
]
s = "SELECT * from generation_fuel_eia923 where strftime('%Y',report_date)='2020'"
gen_fuel = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_fuel = gen_fuel.loc[
gen_fuel.plant_id_eia.isin(eia_plant_ids),
GEN_FUEL_COLS,
]
s = "SELECT * from generation_fuel_nuclear_eia923 where strftime('%Y',report_date)='2020'"
gen_fuel_nuc = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_fuel_nuc = gen_fuel_nuc.loc[
gen_fuel_nuc.plant_id_eia.isin(eia_plant_ids),
GEN_FUEL_COLS,
]
# gen_fuel = pd.concat([gen_fuel, gen_fuel_nuc], ignore_index=True)
s = "SELECT * from generation_eia923 where strftime('%Y',report_date)='2020'"
gen_923 = pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"])
gen_923 = gen_923.loc[
gen_923.plant_id_eia.isin(eia_plant_ids),
:,
]
s = "SELECT * from boiler_fuel_eia923 where strftime('%Y',report_date)='2020'"
boiler_fuel = | pd.read_sql_query(s, pudl_engine, parse_dates=["report_date"]) | pandas.read_sql_query |
"""
Created on Wed May 12 15:36:53 2021
@author: Lenovo
"""
import cv2
import numpy as np
import os
from csv import writer
import pandas as pd
def register():
t=1
lst=[]
directory = r'singleShot'
while(t):
print("Enter 1 to register a new student / 2 to remove an existing student/ 3 to exit: ")
Course_list={1:"B.Tech",2:"B.Agriculture",3:"B.Pharma",4:"B.Com",5:"B.A",6:"BCA",7:"B.Sc",8:"M.Tech",9:"M.A",10:"M.Com",11:"MBA",12:"MCA",13:"Others"}
Choice=int(input())
if(Choice==1):
Name=input("Enter the name of the student: ")
lst.append(Name)
Roll=int(input("Enter the University Roll No of the student: "))
lst.append(Roll)
E_mail= input("Enter the E-mail of the student: ")
lst.append(E_mail)
Phone=int(input("Enter the phone number of the student :"))
lst.append(Phone)
Tel_id=int(input("Enter the telegram id of the student :"))
lst.append(Tel_id)
print(Course_list)
Course=int(input("Input your Course no: "))
Crse=Course_list[Course]
lst.append(Crse)
Semester=input("Enter the Semester of the Student: ")
lst.append(Semester)
Section=input("Enter the Section of the Student: ")
lst.append(Section)
lst.append(0)
data=pd.read_csv("Details.csv")
a_series = pd.Series(lst, index = data.columns)
data=data.append(a_series,ignore_index=True)
data.to_csv("Details.csv",index=False)
print("Data written successfully!")
lst=[]
print("Look into the camera for your picture: ")
k=1
while(k):
cam = cv2.VideoCapture(0)
ret,frame=cam.read()
print(ret)
cam.release()
cv2.imshow("my image", frame)
cv2.waitKey(10000)
cv2.destroyAllWindows()
k=int(input("Enter 1 to retske the image/ 0 if the image is fine: "))
filename = 'singleShot\\'+str(Name)+'_'+str(Roll)+".jpg"
cv2.imwrite(filename, frame)
print("Resgistration Successful")
elif(Choice==2):
data= | pd.read_csv("Details.csv") | pandas.read_csv |
import pandas as pd
from ..utils import constants, plot, utils
import numpy as np
from warnings import warn
from shapely.geometry import Polygon, Point
import geopandas as gpd
from .flowdataframe import FlowDataFrame
from skmob.preprocessing import routing
class TrajSeries(pd.Series):
@property
def _constructor(self):
return TrajSeries
@property
def _constructor_expanddim(self):
return TrajDataFrame
class TrajDataFrame(pd.DataFrame):
"""TrajDataFrame.
A TrajDataFrame object is a pandas.DataFrame that has three columns latitude, longitude and datetime. TrajDataFrame accepts the following keyword arguments:
Parameters
----------
data : list or dict or pandas DataFrame
the data that must be embedded into a TrajDataFrame.
latitude : int or str, optional
the position or the name of the column in `data` containing the latitude. The default is `constants.LATITUDE`.
longitude : int or str, optional
the position or the name of the column in `data` containing the longitude. The default is `constants.LONGITUDE`.
datetime : int or str, optional
the position or the name of the column in `data` containing the datetime. The default is `constants.DATETIME`.
user_id : int or str, optional
the position or the name of the column in `data`containing the user identifier. The default is `constants.UID`.
trajectory_id : int or str, optional
the position or the name of the column in `data` containing the trajectory identifier. The default is `constants.TID`.
timestamp : boolean, optional
it True, the datetime is a timestamp. The default is `False`.
crs : dict, optional
the coordinate reference system of the geographic points. The default is `{"init": "epsg:4326"}`.
parameters : dict, optional
parameters to add to the TrajDataFrame. The default is `{}` (no parameters).
Examples
--------
>>> import skmob
>>> # create a TrajDataFrame from a list
>>> data_list = [[1, 39.984094, 116.319236, '2008-10-23 13:53:05'], [1, 39.984198, 116.319322, '2008-10-23 13:53:06'], [1, 39.984224, 116.319402, '2008-10-23 13:53:11'], [1, 39.984211, 116.319389, '2008-10-23 13:53:16']]
>>> tdf = skmob.TrajDataFrame(data_list, latitude=1, longitude=2, datetime=3)
>>> print(tdf.head())
0 lat lng datetime
0 1 39.984094 116.319236 2008-10-23 13:53:05
1 1 39.984198 116.319322 2008-10-23 13:53:06
2 1 39.984224 116.319402 2008-10-23 13:53:11
3 1 39.984211 116.319389 2008-10-23 13:53:16
>>> print(type(tdf))
<class 'skmob.core.trajectorydataframe.TrajDataFrame'>
>>>
>>> # create a TrajDataFrame from a pandas DataFrame
>>> import pandas as pd
>>> # create a DataFrame from the previous list
>>> data_df = pd.DataFrame(data_list, columns=['user', 'latitude', 'lng', 'hour'])
>>> print(type(data_df))
<class 'pandas.core.frame.DataFrame'>
>>> tdf = skmob.TrajDataFrame(data_df, latitude='latitude', datetime='hour', user_id='user')
>>> print(type(tdf))
<class 'skmob.core.trajectorydataframe.TrajDataFrame'>
>>> print(tdf.head())
uid lat lng datetime
0 1 39.984094 116.319236 2008-10-23 13:53:05
1 1 39.984198 116.319322 2008-10-23 13:53:06
2 1 39.984224 116.319402 2008-10-23 13:53:11
3 1 39.984211 116.319389 2008-10-23 13:53:16
"""
_metadata = ['_parameters', '_crs'] # All the metadata that should be accessible must be also in the metadata method
def __init__(self, data, latitude=constants.LATITUDE, longitude=constants.LONGITUDE, datetime=constants.DATETIME,
user_id=constants.UID, trajectory_id=constants.TID,
timestamp=False, crs={"init": "epsg:4326"}, parameters={}):
original2default = {latitude: constants.LATITUDE,
longitude: constants.LONGITUDE,
datetime: constants.DATETIME,
user_id: constants.UID,
trajectory_id: constants.TID}
columns = None
if isinstance(data, pd.DataFrame):
tdf = data.rename(columns=original2default)
columns = tdf.columns
# Dictionary
elif isinstance(data, dict):
tdf = pd.DataFrame.from_dict(data).rename(columns=original2default)
columns = tdf.columns
# List
elif isinstance(data, list) or isinstance(data, np.ndarray):
tdf = data
columns = []
num_columns = len(data[0])
for i in range(num_columns):
try:
columns += [original2default[i]]
except KeyError:
columns += [i]
elif isinstance(data, pd.core.internals.BlockManager):
tdf = data
else:
raise TypeError('DataFrame constructor called with incompatible data and dtype: {e}'.format(e=type(data)))
super(TrajDataFrame, self).__init__(tdf, columns=columns)
# Check crs consistency
if crs is None:
warn("crs will be set to the default crs WGS84 (EPSG:4326).")
if not isinstance(crs, dict):
raise TypeError('crs must be a dict type.')
self._crs = crs
if not isinstance(parameters, dict):
raise AttributeError("parameters must be a dictionary.")
self._parameters = parameters
if self._has_traj_columns():
self._set_traj(timestamp=timestamp, inplace=True)
def _has_traj_columns(self):
if (constants.DATETIME in self) and (constants.LATITUDE in self) and (constants.LONGITUDE in self):
return True
return False
def _is_trajdataframe(self):
if ((constants.DATETIME in self) and pd.core.dtypes.common.is_datetime64_any_dtype(self[constants.DATETIME]))\
and ((constants.LONGITUDE in self) and | pd.core.dtypes.common.is_float_dtype(self[constants.LONGITUDE]) | pandas.core.dtypes.common.is_float_dtype |
#coding: utf-8
import struct
from pytdx.reader.base_reader import BaseReader
from collections import OrderedDict
import pandas as pd
import os
from io import BytesIO
"""
参考这个 http://blog.csdn.net/Metal1/article/details/44352639
"""
BlockReader_TYPE_FLAT = 0
BlockReader_TYPE_GROUP = 1
class BlockReader(BaseReader):
def get_df(self, fname, result_type=BlockReader_TYPE_FLAT):
result = self.get_data(fname, result_type)
return pd.DataFrame(result)
def get_data(self, fname, result_type=BlockReader_TYPE_FLAT):
result = []
if type(fname) is not bytearray:
with open(fname, "rb") as f:
data = f.read()
else:
data = fname
pos = 384
(num, ) = struct.unpack("<H", data[pos: pos+2])
pos += 2
for i in range(num):
blockname_raw = data[pos: pos+9]
pos += 9
blockname = blockname_raw.decode("gbk", 'ignore').rstrip("\x00")
stock_count, block_type = struct.unpack("<HH", data[pos: pos+4])
pos += 4
block_stock_begin = pos
codes = []
for code_index in range(stock_count):
one_code = data[pos: pos+7].decode("utf-8", 'ignore').rstrip("\x00")
pos += 7
if result_type == BlockReader_TYPE_FLAT:
result.append(
OrderedDict([
("blockname", blockname),
("block_type", block_type),
("code_index", code_index),
("code", one_code),
])
)
elif result_type == BlockReader_TYPE_GROUP:
codes.append(one_code)
if result_type == BlockReader_TYPE_GROUP:
result.append(
OrderedDict([
("blockname", blockname),
("block_type", block_type),
("stock_count", stock_count),
("code_list", ",".join(codes))
])
)
pos = block_stock_begin + 2800
return result
"""
读取通达信备份的自定义板块文件夹,返回格式与通达信板块一致,在广发证券客户端上测试通过,其它未测试
"""
class CustomerBlockReader(BaseReader):
def get_df(self, fname, result_type=BlockReader_TYPE_FLAT):
result = self.get_data(fname, result_type)
return | pd.DataFrame(result) | pandas.DataFrame |
import pandas as pd
dataset_train=pd.read_csv('../input/train.csv')
dataset_test=pd.read_csv('../input/test.csv')
dataset_train.head()
dataset_test.head()
dataset_train.isnull().values.any()
dataset_test.isnull().values.any()
dataset_train.info()
dataset_test.info()
dataset_train.describe()
dataset_test.describe()
dataset_train.shape
dataset_test.shape
dataset_train.head()
X=dataset_train.iloc[:,1:-1].values
X.shape
y=dataset_train.iloc[:,-1].values
y.shape
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.15,random_state=0)
X_train.shape
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train=sc.fit_transform(X_train)
X_test=sc.transform(X_test)
from sklearn.ensemble import RandomForestClassifier
rf=RandomForestClassifier(n_estimators=54,random_state=101,min_samples_split=4,criterion='entropy')
rf.fit(X_train,y_train)
y_pred=rf.predict(X_test)
rf.score(X_test,y_test)
dataset_test.head()
X_new=dataset_test.iloc[:,1:].values
X_new.shape
X_new=sc.transform(X_new)
y_new=rf.predict(X_new)
y_new.shape
upload= | pd.DataFrame(y_new,dataset_test['Id']) | pandas.DataFrame |
#
# Adaptation of spontaneous activity 2 in the developing visual cortex
# M. E. Wosniack et al.
#
# Data analysis codes
# Auxiliar functions file: extra_functions.py
#
# Author: <NAME>
# Max Planck Institute for Brain Research
# <EMAIL>
# June 2020
#
import numpy as np
import pandas as pd
from sklearn.utils import resample
import scipy
from scipy import stats
#
def filter_events_time_concat(data_frame, window_length):
"""
Returns a dataframe with concatenated recordings
It also calculates the average preceding activity without the leak (not used)
Used to find which animals satisfy the threshold on number of H-events to
be included in the analysis
Parameters
----------
data_frame: DataFrame
this is the general data frame with events, from the excel spreadsheet
window_length: float
maximum window to look back at each H-event to look for previous
spontaneous activity events. Notice that recordings are concatenated
Returns
-------
dataframe
contains only H-events and average preceding activity
"""
amps_H = []
amps_previous = []
list_name = []
list_num_H_window = []
list_age = []
list_time_since_event = []
sum_amp_df = pd.DataFrame(columns = ['Individual_name', 'Animal_age', 'Avg_pre_H', 'Amp_H', 'Time_since_last_event'])
for kk in range(len(data_frame)):
if data_frame['Event_type'][kk] == 'H':
recording_id = data_frame['Recording_concat'][kk]
selected_L = data_frame[
(data_frame['Recording_concat'] == recording_id) &
(data_frame['Correct_start'][kk] - data_frame['Correct_end'] <= window_length) &
(data_frame['Correct_start'][kk] - data_frame['Correct_start'] > 0) &
(data_frame['Participation_rate'] >= 20)]
if ~np.isnan(np.mean(selected_L['Amplitude'])):
amps_H.append(data_frame['Amplitude'][kk])
amps_previous.append(np.mean(selected_L['Amplitude']))
list_name.append(data_frame['Individual_name'][kk])
list_age.append(data_frame['Animal_age'][kk])
list_time_since_event.append(np.min(data_frame['Correct_start'][kk] - selected_L['Correct_end']))
sum_amp_df['Individual_name'] = list_name
sum_amp_df['Animal_age'] = list_age
sum_amp_df['Avg_pre_H'] = amps_previous
sum_amp_df['Amp_H'] = amps_H
sum_amp_df['Time_since_last_event'] = list_time_since_event
return(sum_amp_df)
#
def animals_to_include(data_frame, threshold_count):
"""
Simply checks which animals satisfy the inclusion criteria
Parameters
----------
data_frame: DataFrame
this is the output dataframe from the filter_events_time_concat function
threshold_count: int
this is the threshold to include an animal in the analysis
Returns
-------
list
contains the animal IDs to be included in the analysis
"""
animals_in = []
for animal_id in np.unique(data_frame['Individual_name']):
total_H = len(data_frame[data_frame['Individual_name'] == animal_id])
if total_H >= threshold_count:
animals_in.append(animal_id)
return animals_in
#
def exp_decay(tau, time_range):
"""
A simple exponential decay
Parameters
----------
tau: float
decay time constant of the leak integrator
time_range: float
time interval to be applied the decay
Returns
-------
float
exponential decay
"""
decay = np.exp(- time_range / tau)
return decay
#
def make_bootstrap(df_boot):
"""
Bootstrap analysis
Here I fixed 1000 samples, for the 95% range
I used the resample function with replacement from sklearn
Parameters
----------
df_boot: DataFrame
The dataframe, output of the compute_corr_decay function
Returns
-------
list
confidence interval (upper and lower)
"""
total_samples = 1000
vec_corr_boots = []
for kk in range(total_samples):
aux_boot = resample(df_boot['Index_boot'], replace = True, n_samples = len(df_boot))
r2, rpval = scipy.stats.pearsonr(df_boot['Exp_avg_pre_H'][aux_boot], df_boot['Amp_H'][aux_boot])
vec_corr_boots.append(r2)
sorted_vec_corr_boots = np.sort(vec_corr_boots)
return(sorted_vec_corr_boots[24], sorted_vec_corr_boots[974])
#
def computing_differences(data_frame):
"""
Returns the absolute difference in the activity across consecutive recordings
Not an elegant function.
Parameters
----------
data_frame: DataFrame
This has the information of the average activity per recording
Each animal has several recordings, just to keep in mind
Returns
-------
DataFrame
DataFrame with the absolute difference in recording activity for increasing
number of difference between consecutive recordings. E.g., difference in
activity of a recording with next one; difference in activity of a recording
with the second next one, and so on. I kept it up to 8
"""
rec_dist_one = []
rec_dist_two = []
rec_dist_three = []
rec_dist_four = []
rec_dist_five = []
rec_dist_six = []
rec_dist_seven = []
rec_dist_eight = []
for animal in np.unique(data_frame['ID']):
sub_df = data_frame[data_frame['ID'] == animal]
sub_df.reset_index(drop=True, inplace=True)
for ii in range(len(sub_df) - 1):
if (sub_df['Recording'][ii + 1] - sub_df['Recording'][ii]) == 1:
rec_dist_one.append(sub_df['Avg_activity'][ii + 1] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 1] - sub_df['Recording'][ii]) == 2:
rec_dist_two.append(sub_df['Avg_activity'][ii + 1] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 1] - sub_df['Recording'][ii]) == 3:
rec_dist_three.append(sub_df['Avg_activity'][ii + 1] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 1] - sub_df['Recording'][ii]) == 4:
rec_dist_four.append(sub_df['Avg_activity'][ii + 1] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 2):
if (sub_df['Recording'][ii + 2] - sub_df['Recording'][ii]) == 2:
rec_dist_two.append(sub_df['Avg_activity'][ii + 2] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 2] - sub_df['Recording'][ii]) == 3:
rec_dist_three.append(sub_df['Avg_activity'][ii + 2] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 2] - sub_df['Recording'][ii]) == 4:
rec_dist_four.append(sub_df['Avg_activity'][ii + 2] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 2] - sub_df['Recording'][ii]) == 5:
rec_dist_five.append(sub_df['Avg_activity'][ii + 2] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 3):
if (sub_df['Recording'][ii + 3] - sub_df['Recording'][ii]) == 3:
rec_dist_three.append(sub_df['Avg_activity'][ii + 3] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 3] - sub_df['Recording'][ii]) == 4:
rec_dist_four.append(sub_df['Avg_activity'][ii + 3] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 3] - sub_df['Recording'][ii]) == 5:
rec_dist_five.append(sub_df['Avg_activity'][ii + 3] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 3] - sub_df['Recording'][ii]) == 6:
rec_dist_six.append(sub_df['Avg_activity'][ii + 3] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 4):
if (sub_df['Recording'][ii + 4] - sub_df['Recording'][ii]) == 4:
rec_dist_four.append(sub_df['Avg_activity'][ii + 4] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 4] - sub_df['Recording'][ii]) == 5:
rec_dist_five.append(sub_df['Avg_activity'][ii + 4] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 4] - sub_df['Recording'][ii]) == 6:
rec_dist_six.append(sub_df['Avg_activity'][ii + 4] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 4] - sub_df['Recording'][ii]) == 7:
rec_dist_seven.append(sub_df['Avg_activity'][ii + 4] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 5):
if (sub_df['Recording'][ii + 5] - sub_df['Recording'][ii]) == 5:
rec_dist_five.append(sub_df['Avg_activity'][ii + 5] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 5] - sub_df['Recording'][ii]) == 6:
rec_dist_six.append(sub_df['Avg_activity'][ii + 5] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 5] - sub_df['Recording'][ii]) == 7:
rec_dist_seven.append(sub_df['Avg_activity'][ii + 5] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 5] - sub_df['Recording'][ii]) == 8:
rec_dist_eight.append(sub_df['Avg_activity'][ii + 5] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 6):
if (sub_df['Recording'][ii + 6] - sub_df['Recording'][ii]) == 6:
rec_dist_six.append(sub_df['Avg_activity'][ii + 6] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 6] - sub_df['Recording'][ii]) == 7:
rec_dist_seven.append(sub_df['Avg_activity'][ii + 6] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 6] - sub_df['Recording'][ii]) == 8:
rec_dist_eight.append(sub_df['Avg_activity'][ii + 6] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 7):
if (sub_df['Recording'][ii + 7] - sub_df['Recording'][ii]) == 7:
rec_dist_seven.append(sub_df['Avg_activity'][ii + 7] - sub_df['Avg_activity'][ii])
if (sub_df['Recording'][ii + 7] - sub_df['Recording'][ii]) == 8:
rec_dist_eight.append(sub_df['Avg_activity'][ii + 7] - sub_df['Avg_activity'][ii])
#
for ii in range(len(sub_df) - 8):
if (sub_df['Recording'][ii + 8] - sub_df['Recording'][ii]) == 8:
rec_dist_eight.append(sub_df['Avg_activity'][ii + 8] - sub_df['Avg_activity'][ii])
# now organizing this into a dictionary!
amp_diff_recordings = {}
amp_diff_recordings['1'] = rec_dist_one
amp_diff_recordings['2'] = rec_dist_two
amp_diff_recordings['3'] = rec_dist_three
amp_diff_recordings['4'] = rec_dist_four
amp_diff_recordings['5'] = rec_dist_five
amp_diff_recordings['6'] = rec_dist_six
amp_diff_recordings['7'] = rec_dist_seven
amp_diff_recordings['8'] = rec_dist_eight
# and preparing a dataframe out of it
amp_diff_recordings = | pd.DataFrame.from_dict(amp_diff_recordings, orient='index') | pandas.DataFrame.from_dict |
from collections import Counter
import pandas as pd
import sys
data =sys.argv[1] or open("POS.train", "r")
ready_sentence = []
sentence = []
tag_counter = Counter()
word_tag_counter = Counter()
for n in data:
f = n.split()
for m in f:
v = m.split("/")
sentence.append(v)
ready_sentence.append(sentence)
sentence = []
for q in ready_sentence:
for p in q:
if p[0] in word_tag_counter.keys():
l[p[1]] += 1
word_tag_counter[p[0]] = p[1]
else:
l = Counter()
l[p[1]] += 1
word_tag_counter[p[0]] = l
baseline_tags = {}
for n, b in word_tag_counter.items():
if isinstance(b, str):
baseline_tags[n] = b
else:
baseline_tags[n] = max(b.keys(), key=lambda tag: b[tag])
test_data = sys.argv[2] or open("POS.test", "r")
test_set_sentence = []
ground_truth_tags = []
temp_sentence = []
temp_tag = []
for x in test_data:
y = x.split()
for z in y:
g = z.split("/")
temp_sentence.append(g[0])
temp_tag.append((g[1]))
test_set_sentence.append(temp_sentence)
ground_truth_tags.append(temp_tag)
temp_sentence = []
temp_tag = []
pred_sent = []
temp_pred = []
for x in test_set_sentence:
for y in x:
if y in baseline_tags.keys():
temp_pred.append(baseline_tags[y])
else:
temp_pred.append("NN")
pred_sent.append(temp_pred)
temp_pred = []
# accurcy with ground truth
test_out = []
test_out_temp = []
for a, b in zip(pred_sent, test_set_sentence):
for c, d in zip(a, b):
test_out_temp.append(c + "/" + d)
test_out.append(test_out_temp)
test_out_temp = []
tr = 0
fl = 0
for a, b in zip(pred_sent, ground_truth_tags):
for c, d in zip(a, b):
if c == d:
tr += 1
else:
fl += 1
print("true = ", tr, "false = ", fl, "score =", (tr / (tr + fl)))
df = | pd.DataFrame(test_out) | pandas.DataFrame |
"""This code implements the GEO mean predictor from the paper:
Estimating Query Representativeness for Query-Performance Prediction
by Sondak et al."""
import argparse
import pandas as pd
from qpputils import dataparser as dp
from Timer import Timer
parser = argparse.ArgumentParser(description='RSD(wig) predictor',
usage='Change the paths in the code in order to predict UQV/Base queries',
epilog='Generates the RSD predictor scores')
parser.add_argument('-c', '--corpus', default=None, help='The corpus to be used', choices=['ROBUST', 'ClueWeb12B'])
def geo_mean(qdb: dp.QueriesTextParser, probabilities_df: pd.DataFrame):
qdf = qdb.queries_df.set_index('qid')
qdf['qlen'] = qdf['text'].str.split().apply(len)
prob_qlen_df = probabilities_df.groupby('qid').count()
prob_prod_df = probabilities_df.groupby('qid').prod()
zeros_df = prob_qlen_df.subtract(qdf['qlen'], axis=0).applymap(lambda x: 0 if x < 0 else 1)
df = prob_prod_df.mul(zeros_df)
df = | pd.concat([df, qdf['qlen']], axis=1, sort=True) | pandas.concat |
import logging
import time
import pandas as pd
from .featurize import FeaturizedDataset
from .learn import RepairModel
from dataset import AuxTables
class RepairEngine:
def __init__(self, env, dataset):
self.ds = dataset
self.env = env
def setup_featurized_ds(self, featurizers, iteration_number=0):
tic = time.clock()
if iteration_number == 0:
self.feat_dataset = FeaturizedDataset(self.ds, self.env)
self.feat_dataset.create_features(featurizers, iteration_number)
toc = time.clock()
status = "DONE setting up featurized dataset."
feat_time = toc - tic
return status, feat_time
def setup_repair_model(self):
tic = time.clock()
feat_info = self.feat_dataset.featurizer_info
output_dim = self.feat_dataset.classes
self.repair_model = RepairModel(self.env, feat_info, output_dim, bias=self.env['bias'])
toc = time.clock()
status = "DONE setting up repair model."
setup_time = toc - tic
return status, setup_time
def fit_repair_model(self):
tic = time.clock()
if self.env['fusion']:
X_valid, Y_pred, mask_valid, valid_idx = self.feat_dataset.get_infer_data_fusion_validation()
X_train, Y_train, mask_train = self.feat_dataset.get_training_data()
logging.info('training with %d training examples (cells)', X_train.shape[0])
self.repair_model.fit_model(X_train, Y_train, mask_train, X_valid, Y_pred, mask_valid, valid_idx)
toc = time.clock()
status = "DONE training repair model."
train_time = toc - tic
return status, train_time
def infer_repairs(self):
tic = time.clock()
#GM
if self.env['fusion']:
X_pred, mask_pred, infer_idx = self.feat_dataset.get_infer_data_fusion_testing()
else:
X_pred, mask_pred, infer_idx = self.feat_dataset.get_infer_data()
Y_pred = self.repair_model.infer_values(X_pred, mask_pred)
if self.env['fusion']:
distr_df, infer_val_df = self.get_infer_dataframes_fusion(infer_idx, Y_pred)
else:
distr_df, infer_val_df = self.get_infer_dataframes(infer_idx, Y_pred)
self.ds.generate_aux_table(AuxTables.cell_distr, distr_df, store=True, index_attrs=['_vid_'])
self.ds.generate_aux_table(AuxTables.inf_values_idx, infer_val_df, store=True, index_attrs=['_vid_'])
toc = time.clock()
status = "DONE inferring repairs."
infer_time = toc - tic
return status, infer_time
def get_infer_dataframes(self, infer_idx, Y_pred):
distr = []
infer_val = []
Y_assign = Y_pred.data.numpy().argmax(axis=1)
domain_size = self.feat_dataset.var_to_domsize
# Need to map the inferred value index of the random variable to the actual value
# val_idx = val_id - 1 since val_id was numbered starting from 1 whereas
# val_idx starts at 0.
query = 'SELECT _vid_, val_id-1, rv_val FROM {pos_values}'.format(pos_values=AuxTables.pos_values.name)
pos_values = self.ds.engine.execute_query(query)
# dict mapping _vid_ --> val_idx --> value
vid_to_val = {}
for vid, val_idx, val in pos_values:
vid_to_val[vid] = vid_to_val.get(vid, {})
vid_to_val[vid][val_idx] = val
for idx in range(Y_pred.shape[0]):
vid = int(infer_idx[idx])
rv_distr = list(Y_pred[idx].data.numpy())
rv_val_idx = int(Y_assign[idx])
rv_val = vid_to_val[vid][rv_val_idx]
rv_prob = Y_pred[idx].data.numpy().max()
d_size = domain_size[vid]
distr.append({'_vid_': vid, 'distribution':[str(p) for p in rv_distr[:d_size]]})
infer_val.append({'_vid_': vid, 'inferred_val_idx': rv_val_idx, 'inferred_val': rv_val, 'prob':rv_prob})
distr_df = pd.DataFrame(data=distr)
infer_val_df = pd.DataFrame(data=infer_val)
return distr_df, infer_val_df
def get_featurizer_weights(self):
tic = time.clock()
report = self.repair_model.get_featurizer_weights(self.feat_dataset.featurizer_info)
toc = time.clock()
report_time = toc - tic
return report, report_time
# GM
def infer_repairs_fusion(self):
tic = time.clock()
X_pred, mask_pred, infer_idx = self.feat_dataset.get_infer_data_fusion()
Y_pred = self.repair_model.infer_values(X_pred, mask_pred)
# select predicted values with highest probability
distr_df, infer_val_df = self.get_infer_dataframes_fusion(infer_idx, Y_pred)
self.ds.generate_aux_table(AuxTables.cell_distr, distr_df, store=True, index_attrs=['_vid_']) # save distribution datafram to Dataset
self.ds.generate_aux_table(AuxTables.inf_values_idx, infer_val_df, store=True, index_attrs=['_vid_']) # save inferred values dataframe to Dataset object
toc = time.clock()
status = "DONE inferring repairs."
infer_time = toc - tic
return status, infer_time
def infer_repairs_fusion_validation(self):
tic = time.clock()
X_pred, Y, mask_pred, infer_idx = self.feat_dataset.get_infer_data_fusion_validation()
Y_pred = self.repair_model.infer_values(X_pred, mask_pred)
# select predicted values with highest probability
distr_df, infer_val_df = self.get_infer_dataframes_fusion(infer_idx, Y_pred)
self.ds.generate_aux_table(AuxTables.cell_distr, distr_df, store=True, index_attrs=['_vid_']) # save distribution datafram to Dataset
self.ds.generate_aux_table(AuxTables.inf_values_idx, infer_val_df, store=True, index_attrs=['_vid_']) # save inferred values dataframe to Dataset object
toc = time.clock()
status = "DONE inferring repairs."
infer_time = toc - tic
return status, infer_time
def get_infer_dataframes_fusion(self, infer_idx, Y_pred):
distr = []
infer_val = []
Y_assign = Y_pred.data.numpy().argmax(axis=1)
domain_size = self.feat_dataset.var_to_domsize
for idx in range(Y_pred.shape[0]):
vid = int(infer_idx[idx])
rv_distr = list(Y_pred[idx].data.numpy())
rv_value = int(Y_assign[idx])
rv_prob = Y_pred[idx].data.numpy().max()
d_size = domain_size[vid]
distr.append({'_vid_': vid, 'distribution':[str(p) for p in rv_distr[:d_size]]})
infer_val.append({'_vid_': vid, 'inferred_assignment':rv_value, 'prob':rv_prob})
distr_df = pd.DataFrame(data=distr)
infer_val_df = | pd.DataFrame(data=infer_val) | pandas.DataFrame |
"""
Contains the ligand similarity search class.
"""
from pathlib import Path
from typing_extensions import ParamSpecKwargs
import pandas as pd # for creating dataframes and handling data
from .consts import Consts
from .ligand import Ligand
from .helpers import pubchem, rdkit
class LigandSimilaritySearch:
"""
Automated ligand similarity-search process of the pipeline.
Take in input Ligand object, Specs.LigandSimilaritySearch object,
and the corresponding output path, and automatically run all the necessary
processes to output a set of analogs with the highest drug-likeness scores.
Attributes
----------
TODO
all_analogs
"""
def __init__(
self,
ligand_obj,
similarity_search_specs_obj,
similarity_search_output_path,
frozen_data_filepath=None,
):
"""
Initialize the ligand similarity search.
Parameters
----------
ligand_obj : utils.Ligand
The Ligand object of the project.
similarity_search_specs_obj : utils.Specs.LigandSimilaritySearch
The similarity search specification data-class of the project.
similarity_search_output_path : str or pathlib.Path
Output path of the project's similarity search information.
frozen_data_filepath : str or pathlib.Path
If existing data is to be used, provide the path to a csv file
containing the columns "CID" and "CanonicalSMILES" for the analogs.
"""
similarity_search_output_path = Path(similarity_search_output_path)
if not frozen_data_filepath is None:
all_analog_identifiers_df = pd.read_csv(frozen_data_filepath)
elif (
similarity_search_specs_obj.search_engine
is Consts.LigandSimilaritySearch.SearchEngines.PUBCHEM
):
analogs_info = pubchem.similarity_search(
ligand_obj.smiles,
similarity_search_specs_obj.min_similarity_percent,
similarity_search_specs_obj.max_num_results,
)
all_analog_identifiers_df = | pd.DataFrame(analogs_info) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Analize the SRAG data and export the statistics to generate the figure 1
Needs the filter_SRAG.py csv output to run
"""
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from scipy.stats import norm, binom
def median_estimate(X, CI):
n = len(X)
lmd = binom.ppf((1-CI)/2, n, 0.5)
mmd = binom.ppf((1+CI)/2, n, 0.5)
Xo = np.sort(X)
return np.median(Xo), Xo[int(lmd)], Xo[int(mmd)-1]
def freq_estimate(X, CI):
n = len(X)
P = (X==True).sum()
lmd = binom.ppf((1-CI)/2, n, P/n)
mmd = binom.ppf((1+CI)/2, n, P/n)
return P/n, lmd/n, mmd/n
def create_filter_cont(data, ycol, xcols, fname, col_extra=None, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~pd.isna(data[ycol])]
saida = {'name': [], 'mean': [], 'CIme_L':[], 'CIme_H':[], 'median':[], \
'CImd_L':[], 'CImd_H':[]}
saida['name'].append('All')
saida['mean'].append(np.mean(data[ycol]))
saida['CIme_L'].append(np.mean(data[ycol]) + lme*np.std(data[ycol])/len(data[ycol]))
saida['CIme_H'].append(np.mean(data[ycol]) + mme*np.std(data[ycol])/len(data[ycol]))
med, cl, ch = median_estimate(data[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data[col_extra].unique():
data_extra = data[data[col_extra]==val_extra]
saida['name'].append('All_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
for xcol in xcols:
for val in data[xcol].unique():
if val is np.nan:
data_fil = data[pd.isna(data[xcol])]
else:
data_fil = data[data[xcol]==val]
data_fil = data_fil[~pd.isna(data_fil[ycol])]
saida['name'].append(str(xcol)+'_'+str(val))
saida['mean'].append(np.mean(data_fil[ycol]))
saida['CIme_L'].append(np.mean(data_fil[ycol]) + lme*np.std(data_fil[ycol])/len(data_fil[ycol]))
saida['CIme_H'].append(np.mean(data_fil[ycol]) + mme*np.std(data_fil[ycol])/len(data_fil[ycol]))
med, cl, ch = median_estimate(data_fil[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
if col_extra != None:
for val_extra in data_fil[col_extra].unique():
data_extra = data_fil[data_fil[col_extra]==val_extra]
saida['name'].append(str(xcol)+'_'+str(val)+'_'+str(val_extra))
saida['mean'].append(np.mean(data_extra[ycol]))
saida['CIme_L'].append(np.mean(data_extra[ycol]) + lme*np.std(data_extra[ycol])/len(data_extra[ycol]))
saida['CIme_H'].append(np.mean(data_extra[ycol]) + mme*np.std(data_extra[ycol])/len(data_extra[ycol]))
med, cl, ch = median_estimate(data_extra[ycol], CI)
saida['median'].append(med)
saida['CImd_L'].append(cl)
saida['CImd_H'].append(ch)
saida = pd.DataFrame(saida)
saida.to_csv(fname, index=False)
def create_filter_binary(data, ycol, xcols, fname, CI=0.95):
lme = norm.ppf((1-CI)/2)
mme = norm.ppf((1+CI)/2)
data = data[~ | pd.isna(data[ycol]) | pandas.isna |
import logging
import geopandas as gpd
import numpy as np
import pandas as pd
from shapely.geometry import LineString
from rasterstats import zonal_stats
from delft3dfmpy.core import checks, geometry
from delft3dfmpy.datamodels.common import ExtendedDataFrame
import rasterio
import warnings
from rasterio.transform import from_origin
import os
import imod
from tqdm.auto import tqdm
import logging
logger = logging.getLogger(__name__)
def generate_unpaved(catchments, landuse, surface_level, soiltype, surface_storage, infiltration_capacity, initial_gwd, meteo_areas, zonalstats_alltouched=None):
"""
Combine all data to form a complete UNPAVED definition. ALso the coordinates for the networ topology are included.
Zonal statistics are applied to land use, to get the areas per type. The classificition described in the notebook is assumed. From the elevation grid, the median value per catchment is assumed, and for soil type the dominant soil type in the catchment is used. Other parameters can be prescribed as as float (spatially uniform) or as a raster name, in which case the mean value per catchment is used.
"""
all_touched=False if zonalstats_alltouched is None else zonalstats_alltouched
# required rasters
warnings.filterwarnings('ignore')
lu_rast, lu_affine = read_raster(landuse, static=True)
lu_counts = zonal_stats(catchments, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)
rast, affine = read_raster(soiltype, static=True)
soiltypes = zonal_stats(catchments, soiltype, affine = affine, stats='majority',all_touched=all_touched)
rast, affine = read_raster(surface_level, static=True)
mean_elev = zonal_stats(catchments, rast, affine=affine, stats="median",all_touched=all_touched)
# optional rasters
if isinstance(surface_storage, str):
rast,affine = read_raster(surface_storage, static=True)
sstores = zonal_stats(catchments, rast, affine=affine, stats="mean",all_touched=True)
elif isinstance(surface_storage,int):
surface_storage = float(surface_storage)
if isinstance(infiltration_capacity, str):
rast,affine = read_raster(infiltration_capacity, static=True)
infcaps = zonal_stats(catchments, rast, affine=affine, stats="mean",all_touched=True)
elif isinstance(infiltration_capacity,int):
infiltration_capacity = float(infiltration_capacity)
if isinstance(initial_gwd, str):
rast,affine = read_raster(initial_gwd, static=True)
ini_gwds = zonal_stats(catchments, rast, affine=affine, stats="mean", all_touched=True)
elif isinstance(initial_gwd,int):
initial_gwd = float(initial_gwd)
# get raster cellsize
px_area = lu_affine[0] * -lu_affine[4]
unpaved_drr = ExtendedDataFrame(required_columns=['code'])
unpaved_drr.set_data( pd.DataFrame(np.zeros((len(catchments),12)),
columns=['code','total_area','lu_areas','mvlevel',
'soiltype','surstor','infcap','initial_gwd','meteostat','px','py','boundary'
], dtype="str"), index_col='code')
unpaved_drr.index = catchments.code
# HyDAMO Crop code; hydamo name, sobek index, sobek name:
# 1 aardappelen 3 potatoes
# 2 graan 5 grain
# 3 suikerbiet 4 sugarbeet
# 4 mais 2 corn
# 5 overige gew. 15 vegetables
# 6 bloembollen 10 bulbous plants
# 7 boomgaard 9 orchard
# 8 gras 1 grass
# 9 loofbos 11 dediduous
# 10 naaldbos 12 conferous
# 11 natuuur 13 nature
# 12 braak 14 fallow
sobek_indices = [3,5,4,2,15,10,9,1,11,12,13,14]
for num, cat in enumerate(catchments.itertuples()):
# if no rasterdata could be obtained for this catchment, skip it.
if mean_elev[num]['median'] is None:
logger.warning('No rasterdata available for catchment %s' % cat.code)
continue
tm = [m for m in meteo_areas.itertuples() if m.geometry.contains(cat.geometry.centroid)]
ms = meteo_areas.iloc[0,:][0] if tm==[] else tm[0].code
# find corresponding meteo-station
#ms = [ms for ms in meteo_areas.itertuples() if ms.geometry.contains(cat.geometry.centroid)]
#ms = ms[0] if ms != [] else meteo_areas.iloc[0,:][0]
mapping = np.zeros(16, dtype=int)
for i in range(1,13):
if i in lu_counts[num]: mapping[sobek_indices[i-1]-1] = lu_counts[num][i]*px_area
lu_map = ' '.join(map(str,mapping))
elev = mean_elev[num]['median']
unpaved_drr.at[cat.code, 'code'] = str(cat.code)
unpaved_drr.at[cat.code, 'total_area'] = f'{cat.geometry.area:.0f}'
unpaved_drr.at[cat.code, 'lu_areas'] = lu_map
unpaved_drr.at[cat.code, 'mvlevel'] = f'{elev:.2f}'
unpaved_drr.at[cat.code, 'soiltype'] =f'{soiltypes[num]["majority"]+100.:.0f}'
if isinstance(surface_storage, float):
unpaved_drr.at[cat.code, 'surstor'] = f'{surface_storage:.3f}'
else:
unpaved_drr.at[cat.code, 'surstor'] = f'{sstores[num]["mean"]:.3f}'
if isinstance(infiltration_capacity, float):
unpaved_drr.at[cat.code, 'infcap'] = f'{infiltration_capacity:.3f}'
else:
unpaved_drr.at[cat.code, 'infcap'] = f'{infcaps[num]["mean"]:.3f}'
if isinstance(initial_gwd, float):
unpaved_drr.at[cat.code, 'initial_gwd'] = f'{initial_gwd:.2f}'
else:
unpaved_drr.at[cat.code, 'initial_gwd'] = f'{ini_gwds[num]["mean"]:.2f}'
unpaved_drr.at[cat.code, 'meteostat'] = ms
unpaved_drr.at[cat.code, 'px'] = f'{cat.geometry.centroid.coords[0][0]-10:.0f}'
unpaved_drr.at[cat.code, 'py'] = f'{cat.geometry.centroid.coords[0][1]:.0f}'
unpaved_drr.at[cat.code, 'boundary'] = cat.lateraleknoopcode
return unpaved_drr
def generate_ernst(catchments, depths, resistance, infiltration_resistance, runoff_resistance):
"""
The lists with depths and resistances as well as the standard infiltration and runoff resistances, are converted, to a dataframe.
"""
ernst_drr = ExtendedDataFrame(required_columns=['code'])
ernst_drr.set_data( pd.DataFrame(np.zeros((len(catchments),5)),
columns=['code','reslist','lvs','cvi','cvs'], dtype="str"), index_col='code')
ernst_drr.index = catchments.code
for num, cat in enumerate(catchments.itertuples()):
ernst_drr.at[cat.code, 'code'] = str(cat.code)
ernst_drr.at[cat.code, 'reslist'] = ' '.join([str(res) for res in resistance])
ernst_drr.at[cat.code, 'lvs'] = ' '.join([str(depth) for depth in depths])
ernst_drr.at[cat.code, 'cvi'] = str(infiltration_resistance)
ernst_drr.at[cat.code, 'cvs'] = str(runoff_resistance)
return ernst_drr
def generate_paved( catchments=None,
overflows=None,
sewer_areas=None,
landuse=None,
surface_level=None,
street_storage=None,
sewer_storage=None,
pump_capacity=None,
meteo_areas=None,
zonalstats_alltouched=None):
"""
Combine all data to form a complete PAVED definition. ALso the coordinates for the networ topology are included.
Zonal statistics are applied to land use, to get the paved area. The classificition described in the notebook is assumed. From the elevation grid, the median value per catchment is assumed. Other parameters can be prescribed as as float (spatially uniform) or as a raster name, in which case the mean value per catchment is used.
"""
all_touched=False if zonalstats_alltouched is None else zonalstats_alltouched
lu_rast, lu_affine = read_raster(landuse, static=True)
lu_counts = zonal_stats(catchments, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)
sl_rast, sl_affine = read_raster(surface_level, static=True)
mean_elev = zonal_stats(catchments, sl_rast, affine=sl_affine, stats="median",all_touched=all_touched)
if isinstance( street_storage, str):
strs_rast, strs_affine = read_raster(street_storage, static=True)
str_stors = zonal_stats(catchments, strs_rast, affine=strs_affine, stats="mean", all_touched=True)
elif isinstance( street_storage, int):
street_storage = float(street_storage)
if isinstance( sewer_storage, str):
sews_rast, sews_affine = read_raster(sewer_storage, static=True)
sew_stors = zonal_stats(catchments, sews_rast, affine=sews_affine, stats="mean", all_touched=True)
elif isinstance( sewer_storage, int):
sewer_storage = float(sewer_storage)
if isinstance(pump_capacity, str):
pump_rast, pump_affine = read_raster(pump_capacity, static=True)
pump_caps = zonal_stats(catchments, pump_rast, affine=pump_affine, stats="mean", all_touched=True)
elif isinstance( pump_capacity, int):
pump_capacity = float(pump_capacity)
def update_dict(dict1, dict2):
for i in dict2.keys():
if i in dict1:
dict1[i]+=dict2[i]
else:
dict1[i] = dict2[i]
return dict1
# get raster cellsize
px_area = lu_affine[0] * -lu_affine[4]
paved_drr = ExtendedDataFrame(required_columns=['code'])
if sewer_areas is not None:
# if the parameters area rasters, do the zonal statistics per sewage area as well.
if isinstance( street_storage, str):
str_stors_sa = zonal_stats(sewer_areas, strs_rast,affine=strs_affine,stats="mean", all_touched=True)
if isinstance( sewer_storage, str):
sew_stors_sa = zonal_stats(sewer_areas, sews_rast,affine=sews_affine,stats="mean", all_touched=True)
if isinstance(pump_capacity, str):
pump_caps_sa = zonal_stats(sewer_areas, pump_rast,affine=pump_affine,stats="mean", all_touched=True)
mean_sa_elev = zonal_stats(sewer_areas, sl_rast, affine=sl_affine, stats="median",all_touched=True)
# initialize the array of paved nodes, which should contain a node for all catchments and all overflows
paved_drr.set_data( pd.DataFrame(np.zeros((len(catchments)+len(overflows),10)),
columns=['code','area','mvlevel', 'streetstor', 'sewstor', 'pumpcap','meteostat','px', 'py', 'boundary'], dtype="str"), index_col='code')
paved_drr.index = catchments.code.append(overflows.code)
# find the paved area in the sewer areas
for isew, sew in enumerate(sewer_areas.itertuples()):
pav_area = 0
for cat_ind, cat in enumerate(catchments.itertuples()):
# if no rasterdata could be obtained for this catchment, skip it.
if mean_elev[cat_ind]['median'] is None:
logger.warning('No rasterdata available for catchment %s' % cat.code)
continue
if(cat.geometry.intersects(sew.geometry)):
test_intersect = cat.geometry.intersection(sew.geometry)
#print(cat.Index+' '+sew.Index+' '+test_intersect.type)
if test_intersect.type =='LineString':
logger.warning('Intersection in %s contains of LineStrings, not polygons. Skipping. '% cat.code)
continue
if test_intersect.type=='GeometryCollection':
numpol = 0
logger.info('Intersection in %s contains a GeometryCollection - splitting into polygons.'% cat.code)
for int_ft in test_intersect:
if int_ft.type == 'Polygon':
if numpol==0:
intersecting_pixels = zonal_stats(int_ft, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)[0]
else:
temp_int = zonal_stats(int_ft, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)[0]
intersecting_pixels = update_dict(intersecting_pixels, temp_int)
numpol += 1
else:
# find the paved area within the intersection and add it to the sewer area sum
intersecting_pixels = zonal_stats(cat.geometry.intersection(sew.geometry), lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)[0]
if intersecting_pixels=={}:
continue
if 14.0 not in intersecting_pixels:
logger.warning('%s/%s: no paved area in sewer area intersection!' % (sew.code, cat.code))
continue
pav_pixels = intersecting_pixels[14.0]
pav_area += pav_pixels*px_area
# subtract it fromthe total paved area in this catchment, make sure at least 0 remains
lu_counts[cat_ind][14.0] -= pav_pixels
if lu_counts[cat_ind][14.0] < 0: lu_counts[cat_ind][14.0] = 0
elev = mean_sa_elev[isew]['median']
# find overflows related to this sewer area
ovf = overflows[overflows.codegerelateerdobject==sew.code]
for ov in ovf.itertuples():
# find corresponding meteo-station
tm = [m for m in meteo_areas.itertuples() if m.geometry.contains(sew.geometry.centroid)]
ms = meteo_areas.iloc[0,:][0] if tm==[] else tm[0].code
#ms = ms[0] if ms != [] else meteo_areas.iloc[0,:][0]
# add prefix to the overflow id to create the paved-node id
paved_drr.at[ov.code, 'code'] = str(ov.code)
paved_drr.at[ov.code, 'area'] = str(pav_area * ov.fractie)
paved_drr.at[ov.code, 'mvlevel'] = f'{elev:.2f}'
# if a float is given, a standard value is passed. If a string is given, a rastername is assumed to zonal statistics are applied.
if isinstance(street_storage, float):
paved_drr.at[ov.code, 'streetstor'] = f'{street_storage:.2f}'
else:
paved_drr.at[ov.code, 'streetstor'] = f'{str_stors_sa[isew]["mean"]:.2f}'
if isinstance(sewer_storage, float):
paved_drr.at[ov.code, 'sewstor'] = f'{sewer_storage:.2f}'
else:
paved_drr.at[ov.code, 'sewstor'] = f'{sew_stors_sa[isew]["mean"]:.2f}'
if isinstance(pump_capacity, float):
paved_drr.at[ov.code, 'pumpcap'] = f'{pump_capacity}'
else:
paved_drr.at[ov.code, 'pumpcap'] = f'{pump_caps_sa[isew]["mean"]:.2f}'
paved_drr.at[ov.code,'meteostat'] = ms
paved_drr.at[ov.code, 'px'] = f'{ov.geometry.coords[0][0]+10:.0f}'
paved_drr.at[ov.code, 'py'] = f'{ov.geometry.coords[0][1]:.0f}'
paved_drr.at[ov.code, 'boundary'] = ov.code
else:
# in this case only the catchments are taken into account. A node is created for every catchment nonetheless, but only nodes with a remaining area >0 are written.
paved_drr.set_data( pd.DataFrame(np.zeros((len(catchments),10)),
columns=['code','area','mvlevel', 'streetstor', 'sewstor', 'pumpcap','meteostat', 'px', 'py', 'boundary'], dtype="str"), index_col='code')
paved_drr.index = catchments.code
for num, cat in enumerate(catchments.itertuples()):
# if no rasterdata could be obtained for this catchment, skip it.
if mean_elev[num]['median'] is None:
logger.warning('No rasterdata available for catchment %s' % cat.code)
continue
# find corresponding meteo-station
tm = [m for m in meteo_areas.itertuples() if m.geometry.contains(cat.geometry.centroid)]
ms = meteo_areas.iloc[0,:][0] if tm==[] else tm[0].code
elev = mean_elev[num]['median']
paved_drr.at[cat.code, 'code'] = str(cat.code)
paved_drr.at[cat.code, 'area'] = str(lu_counts[num][14]*px_area) if 14 in lu_counts[num] else '0'
paved_drr.at[cat.code, 'mvlevel'] = f'{elev:.2f}'
# if a float is given, a standard value is passed. If a string is given, a rastername is assumed to zonal statistics are applied.
if isinstance(street_storage, float):
paved_drr.at[cat.code, 'streetstor'] = f'{street_storage:.2f}'
else:
paved_drr.at[cat.code, 'streetstor'] = f'{str_stors[num]["mean"]:.2f}'
if isinstance(sewer_storage, float):
paved_drr.at[cat.code, 'sewstor'] = f'{sewer_storage:.2f}'
else:
paved_drr.at[cat.code, 'sewstor'] = f'{sew_stors[num]["mean"]:.2f}'
if isinstance(pump_capacity, float):
paved_drr.at[cat.code, 'pumpcap'] = f'{pump_capacity}'
else:
paved_drr.at[cat.code, 'pumpcap'] = f'{pump_caps[num]["mean"]:.2f}'
paved_drr.at[cat.code,'meteostat'] = ms
paved_drr.at[cat.code, 'px'] = f'{cat.geometry.centroid.coords[0][0]+10:.0f}'
paved_drr.at[cat.code, 'py'] = f'{cat.geometry.centroid.coords[0][1]:.0f}'
paved_drr.at[cat.code, 'boundary'] = cat.lateraleknoopcode
return paved_drr
def generate_greenhouse(catchments, landuse, surface_level, roof_storage, meteo_areas, zonalstats_alltouched=None):
"""
Combine all data to form a complete GREENHSE defintion. ALso the coordinates for the network topology are included.
Zonal statistics are applied to land use, to get the paved area. The classificition described in the notebook is assumed. From the elevation grid, the median value per catchment is assumed. Other parameters can be prescribed as as float (spatially uniform) or as a raster name, in which case the mean value per catchment is used.
"""
all_touched=False if zonalstats_alltouched is None else zonalstats_alltouched
lu_rast, lu_affine = read_raster(landuse, static=True)
lu_counts = zonal_stats(catchments, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)
rast, affine = read_raster(surface_level, static=True)
mean_elev = zonal_stats(catchments, rast, affine=affine, stats="median", all_touched=all_touched)
# optional rasters
if isinstance(roof_storage, str):
rast, affine = read_raster(roof_storage, static=True)
roofstors = zonal_stats(catchments, rast, affine=affine, stats="mean", all_touched=True)
elif isinstance(roof_storage, int):
roof_storage = float(roof_storage)
# get raster cellsize
px_area = lu_affine[0] * -lu_affine[4]
gh_drr = ExtendedDataFrame(required_columns=['code'])
gh_drr.set_data( pd.DataFrame(np.zeros((len(catchments),8)),
columns=['code','area','mvlevel', 'roofstor', 'meteostat','px', 'py', 'boundary'], dtype="str"), index_col='code')
gh_drr.index = catchments.code
for num, cat in enumerate(catchments.itertuples()):
# if no rasterdata could be obtained for this catchment, skip it.
if mean_elev[num]['median'] is None:
logger.warning('No rasterdata available for catchment %s' % cat.code)
continue
# find corresponding meteo-station
tm = [m for m in meteo_areas.itertuples() if m.geometry.contains(cat.geometry.centroid)]
ms = meteo_areas.iloc[0,:][0] if tm==[] else tm[0].code
elev = mean_elev[num]['median']
gh_drr.at[cat.code, 'code'] = str(cat.code)
gh_drr.at[cat.code, 'area'] = str(lu_counts[num][15]*px_area) if 15 in lu_counts[num] else '0'
gh_drr.at[cat.code, 'mvlevel'] = f'{elev:.2f}'
if isinstance(roof_storage, float):
gh_drr.at[cat.code, 'roofstor'] = f'{roof_storage:.2f}'
else:
gh_drr.at[cat.code, 'roofstor'] = f'{roofstors[num]["mean"]:.2f}'
gh_drr.at[cat.code, 'meteostat'] = ms
gh_drr.at[cat.code, 'px'] = f'{cat.geometry.centroid.coords[0][0]+20:.0f}'
gh_drr.at[cat.code, 'py'] = f'{cat.geometry.centroid.coords[0][1]:.0f}'
gh_drr.at[cat.code, 'boundary'] = cat.lateraleknoopcode
return gh_drr
def generate_openwater(catchments, landuse, meteo_areas, zonalstats_alltouched=None):
"""
Combine all data to form a complete OPENWATE definotion. ALso the coordinates for the network topology are included.
Zonal statistics are applied to land use, to get the open water area. The classificition described in the notebook is assumed.
"""
all_touched=False if zonalstats_alltouched is None else zonalstats_alltouched
lu_rast, lu_affine = read_raster(landuse, static=True)
lu_counts = zonal_stats(catchments, lu_rast, affine=lu_affine, categorical=True, all_touched=all_touched)
# get raster cellsize
px_area = lu_affine[0] * -lu_affine[4]
ow_drr = ExtendedDataFrame(required_columns=['code'])
ow_drr.set_data( pd.DataFrame(np.zeros((len(catchments),6)),
columns=['code','area','meteostat','px', 'py', 'boundary'], dtype="str"), index_col='code')
ow_drr.index = catchments.code
for num, cat in enumerate(catchments.itertuples()):
# find corresponding meteo-station
tm = [m for m in meteo_areas.itertuples() if m.geometry.contains(cat.geometry.centroid)]
ms = meteo_areas.iloc[0,:][0] if tm==[] else tm[0].code
ow_drr.at[cat.code, 'code'] = str(cat.code)
ow_drr.at[cat.code, 'area'] = str(lu_counts[num][13]*px_area) if 13 in lu_counts[num] else '0'
ow_drr.at[cat.code, 'meteostat'] = ms
ow_drr.at[cat.code, 'px'] = f'{cat.geometry.centroid.coords[0][0]-20:.0f}'
ow_drr.at[cat.code, 'py'] = f'{cat.geometry.centroid.coords[0][1]:.0f}'
ow_drr.at[cat.code, 'boundary'] = cat.lateraleknoopcode
return ow_drr
def generate_boundary(boundary_nodes, catchments, drrmodel, overflows=None):
"""
Method to create boundary nodes for RR.
"""
# find the catchments that have no area attached and no nodes that will be attached to the boundary
not_occurring = []
for cat in catchments.itertuples():
occurs = False
if cat.lateraleknoopcode in [val['boundary_node'] for val in drrmodel.unpaved.unp_nodes.values() if np.sum([float(d) for d in val['ar'].split(' ')]) > 0.0]:
occurs = True
if cat.lateraleknoopcode in [val['boundary_node'] for val in drrmodel.paved.pav_nodes.values() if float(val['ar']) > 0.0]:
occurs = True
if cat.lateraleknoopcode in [val['boundary_node'] for val in drrmodel.greenhouse.gh_nodes.values() if float(val['ar']) > 0.0]:
occurs = True
if cat.lateraleknoopcode in [val['boundary_node'] for val in drrmodel.openwater.ow_nodes.values() if float(val['ar']) > 0.0]:
occurs = True
if occurs== False:
not_occurring.append(cat.lateraleknoopcode)
for i in not_occurring:
catchments.drop(catchments[catchments.lateraleknoopcode==i].code.iloc[0], axis=0, inplace=True)
if overflows is not None:
numlats = len(catchments)+len(overflows)
else:
numlats = len(catchments)
bnd_drr = ExtendedDataFrame(required_columns=['code'])
bnd_drr.set_data( pd.DataFrame(np.zeros((numlats,3)),
columns=['code', 'px', 'py'], dtype="str"), index_col='code')
if overflows is not None:
bnd_drr.index = catchments.code.append(overflows.code)
else:
bnd_drr.index = catchments.code
for num, cat in enumerate(catchments.itertuples()):
# print(num, cat.code)
if boundary_nodes[boundary_nodes['code']==cat.lateraleknoopcode].empty:
#raise IndexError(f'{cat.code} not connected to a boundary node. Skipping.')
logger.warning('%s not connected to a boundary node. Skipping.' % cat.code)
continue
bnd_drr.at[cat.code, 'code'] = cat.lateraleknoopcode
bnd_drr.at[cat.code, 'px'] = str(boundary_nodes[boundary_nodes['code']==cat.lateraleknoopcode]['geometry'].x.iloc[0]).strip()
bnd_drr.at[cat.code, 'py'] = str(boundary_nodes[boundary_nodes['code']==cat.lateraleknoopcode]['geometry'].y.iloc[0]).strip()
if overflows is not None:
logger.info('Adding overflows to the boundary nodes.')
for num, ovf in enumerate(overflows.itertuples()):
bnd_drr.at[ovf.code, 'code'] = ovf.code
bnd_drr.at[ovf.code, 'px'] = str(ovf.geometry.coords[0][0])
bnd_drr.at[ovf.code, 'py'] = str(ovf.geometry.coords[0][1])
return bnd_drr
def generate_seepage(catchments, seepage_folder):
"""
Method to obtain catchment-average seepage fluxes from rasters. The time step is deduced from the raster filenames.
We assume seepage is read from Metaswap (m3 per cell). It needs to be converted to mm/day.
"""
warnings.filterwarnings('ignore')
file_list = os.listdir(seepage_folder)
times = []
arr = np.zeros((len(file_list), len(catchments.code)))
for ifile, file in tqdm(enumerate(file_list),total=len(file_list),desc='Reading seepage files'):
array, affine, time = read_raster(os.path.join(seepage_folder, file))
times.append(time)
stats = zonal_stats(catchments, array, affine=affine, stats="mean", all_touched=True)
arr[ifile,:] = [s['mean'] for s in stats]
result = pd.DataFrame(arr,columns='sep_'+catchments.code)
result.index = times
# convert units
result_mmd = (result / (1e-3*(affine[0]*-affine[4])))/((times[2]-times[1]).total_seconds()/86400.)
return result_mmd
def generate_precip(areas, precip_folder):
"""
Method to obtain catchment-average seepage fluxes from rasters. The time step is deduced from the raster filenames.
"""
warnings.filterwarnings('ignore')
file_list = os.listdir(precip_folder)
times = []
arr = np.zeros((len(file_list), len(areas.code)))
for ifile, file in tqdm(enumerate(file_list),total=len(file_list),desc='Reading precipitation files'):
array, affine, time = read_raster(os.path.join(precip_folder, file))
times.append(time)
stats = zonal_stats(areas, array, affine=affine, stats="mean", all_touched=True)
arr[ifile,:]= [s['mean'] for s in stats]
result = pd.DataFrame(arr, columns='ms_'+areas.code)
result.index = times
return result
def generate_evap(areas, evap_folder):
"""
Method to obtain catchment-average evaporation fluxes from rasters. The time step is deduced from the raster filenames. Since only one timeeries is allowed, the meteo areas are dissolved to a user specifield field.
"""
warnings.filterwarnings('ignore')
file_list = os.listdir(evap_folder)
# aggregated evap
areas['dissolve'] = 1
agg_areas = areas.iloc[0:len(areas),:].dissolve(by='dissolve',aggfunc='mean')
times = []
arr = np.zeros((len(file_list), 1))
for ifile, file in tqdm(enumerate(file_list),total=len(file_list),desc='Reading evaporation files'):
array, affine, time = read_raster(os.path.join(evap_folder, file))
times.append(time)
stats = zonal_stats(agg_areas, array, affine=affine, stats="mean",all_touched=True)
arr[ifile,:] = [s['mean'] for s in stats]
result = | pd.DataFrame(arr,columns=['ms_'+areas.iloc[0,0]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
#The Data
with open('kddcup.names', 'r') as infile:
kdd_names = infile.readlines()
kdd_cols = [x.split(':')[0] for x in kdd_names[1:]]
kdd_cols += ['class', 'difficulty']
kdd = pd.read_csv('nsl-KDDTrain+.txt', names=kdd_cols)
kdd_t = pd.read_csv('nsl-KDDTest+.txt', names=kdd_cols)
kdd.head()
kdd_cols = [kdd.columns[0]] + sorted(list(set(kdd.protocol_type.values))) + sorted(list(set(kdd.service.values))) + sorted(list(set(kdd.flag.values))) + kdd.columns[4:].tolist()
attack_map = [x.strip().split() for x in open('training_attack_types_binary', 'r')]
attack_map = {k:v for (k,v) in attack_map}
attack_map
kdd['class'] = kdd['class'].replace(attack_map)
kdd_t['class'] = kdd_t['class'].replace(attack_map)
##############################################
def cat_encode(df, col):
return pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col].values)], axis=1)
def log_trns(df, col):
return df[col].apply(np.log1p)
cat_lst = ['protocol_type', 'service', 'flag']
for col in cat_lst:
kdd = cat_encode(kdd, col)
kdd_t = cat_encode(kdd_t, col)
log_lst = ['duration', 'src_bytes', 'dst_bytes']
for col in log_lst:
kdd[col] = log_trns(kdd, col)
kdd_t[col] = log_trns(kdd_t, col)
kdd = kdd[kdd_cols]
for col in kdd_cols:
if col not in kdd_t.columns:
kdd_t[col] = 0
kdd_t = kdd_t[kdd_cols]
kdd.head()
##############################################
difficulty = kdd.pop('difficulty')
target = kdd.pop('class')
y_diff = kdd_t.pop('difficulty')
y_test = kdd_t.pop('class')
target = pd.get_dummies(target)
y_test = | pd.get_dummies(y_test) | pandas.get_dummies |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 10 19:01:55 2019
@author: ashik
"""
import pandas as pd
import time
import os
import glob
import datetime
from datetime import timedelta
import scipy.stats
import math
#articleDF = pd.read_excel("data/ajb9b3.xlsx")
#time.strftime("%A %Y-%m-%d %H:%M:%S", time.localtime(df.iloc[1,6]/1000))
def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,
truncate_sheet=False,
**to_excel_kwargs):
from openpyxl import load_workbook
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
writer.book = load_workbook(filename)
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
if truncate_sheet and sheet_name in writer.book.sheetnames:
idx = writer.book.sheetnames.index(sheet_name)
writer.book.remove(writer.book.worksheets[idx])
writer.book.create_sheet(sheet_name, idx)
writer.sheets = {ws.title:ws for ws in writer.book.worksheets}
except FileNotFoundError:
pass
if startrow is None:
startrow = 0
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs)
writer.save()
def calculateWeekData(articleDF, tempIndex):
day = int(time.strftime("%d", time.localtime(articleDF.iloc[tempIndex,6]/1000)))
weekDate = time.strftime('%A', time.localtime(articleDF.iloc[tempIndex,5]/1000))
while(weekDate!='Monday' and tempIndex<len(articleDF)):
weekDate = time.strftime('%A', time.localtime(articleDF.iloc[tempIndex,5]/1000))
tempIndex = tempIndex+1
print(weekDate)
weekDayDF = pd.DataFrame()
'''
if tempIndex<len(articleDF):
day = int(time.strftime("%d", time.localtime(articleDF.iloc[tempIndex,6]/1000)))'''
nextWeek = day+5
nextDay = 'Monday'
#while (day<nextWeek and tempIndex<len(articleDF)):
while (nextDay!='Saturday' and tempIndex<len(articleDF)):
duration = int(articleDF.iloc[tempIndex,9])
if (duration>=10):
weekDay = time.strftime('%A', time.localtime(articleDF.iloc[tempIndex,5]/1000))
startTime = time.strftime("%H:%M:%S", time.localtime(articleDF.iloc[tempIndex,5]/1000))
endTime = time.strftime("%H:%M:%S", time.localtime(articleDF.iloc[tempIndex,6]/1000))
timeRange = startTime+"-"+endTime
dpd = float(articleDF.iloc[tempIndex, 3])/duration
startTime = articleDF.iloc[tempIndex, 5]
endTime = articleDF.iloc[tempIndex, 6]
weekDate = time.strftime("%Y-%m-%d", time.localtime(articleDF.iloc[tempIndex,6]/1000))
weekDayDF = weekDayDF.append({'Day': weekDay, 'Time':timeRange, 'Octets/Duration':dpd, 'Date':weekDate, 'Start time': startTime, 'End time':endTime}, ignore_index = True)
#print(day)
#day = int(time.strftime("%d", time.localtime(articleDF.iloc[tempIndex,6]/1000)))
nextDay = time.strftime("%A", time.localtime(articleDF.iloc[tempIndex,6]/1000))
tempIndex = tempIndex+1
if (len(weekDayDF)):
weekDayDF = weekDayDF.sort_values(by=['Start time'])
weekDayDF = weekDayDF.reset_index(drop=True)
else:
startTime = articleDF.iloc[tempIndex-1,5]/1000
weekDayDF = weekDayDF.append({'Day': 0, 'Time':0, 'Octets/Duration':0, 'Date':0, 'Start time': startTime, 'End time':0}, ignore_index = True)
return weekDayDF, tempIndex
def getWeek(articleDF):
tempIndex = 0
firstWeek = pd.DataFrame()
firstWeek, tempIndex = calculateWeekData(articleDF, tempIndex)
print("first")
secondWeek = pd.DataFrame()
if (tempIndex<len(articleDF)):
print("goto 2nd")
print(tempIndex)
secondWeek, tempIndex = calculateWeekData(articleDF, tempIndex)
else:
startTime = articleDF.iloc[tempIndex-1,5]/1000
secondWeek = secondWeek.append({'Day': 0, 'Time':0, 'Octets/Duration':0, 'Date':0, 'Start time': startTime, 'End time':0}, ignore_index = True)
return firstWeek, secondWeek
def epochTimeCreate(weekDataDF):
epochListDF = pd.DataFrame()
firstDay = weekDataDF.iloc[0,4]
day = int(time.strftime("%d", time.localtime(firstDay/1000)))
month = int(time.strftime("%m", time.localtime(firstDay/1000)))
year = int(time.strftime("%Y", time.localtime(firstDay/1000)))
timeFormat = "%Y-%m-%d %H:%M:%S"
t1 = datetime.datetime(year, month, day, 8, 00, 00)
t2 = datetime.datetime(year, month, day, 17, 00, 00)
for i in range (0,5):
startTime = t1 + timedelta(days=i)
endTime = t2 + timedelta(days=i)
epochInitial = int(time.mktime(time.strptime(str(startTime), timeFormat)))
epochFinal = int(time.mktime(time.strptime(str(endTime), timeFormat)))
while epochInitial<=epochFinal:
curTime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(epochInitial))
epochListDF = epochListDF.append({'Epoch list':str(epochInitial), 'Real time':curTime}, ignore_index=True)
epochInitial = epochInitial+10
return epochListDF
def splitData(epochListDF, weekDataDF, delN):
index=0
i=0
finalSplitList = pd.DataFrame()
sameIndex=0
flag=1
while(i<len(epochListDF) and index<len(weekDataDF)):
epochtime = int(epochListDF['Epoch list'][i])
epochtime2 = int((weekDataDF['Start time'][index])/1000)
startTime = time.strftime("%A -- %H:%M:%S --", time.localtime(epochtime))
endtime = epochtime+delN
endTime = time.strftime("%H:%M:%S", (time.localtime(endtime)))
timeRange = startTime + endTime
if ((epochtime2>=epochtime and epochtime2<endtime) and (len(finalSplitList)!=0)):
opd = weekDataDF['Octets/Duration'][index]
if(timeRange == finalSplitList['Time'][len(finalSplitList)-1]):
prev = finalSplitList['Octets/Duration'][len(finalSplitList)-1]
opdFinal = (prev*sameIndex+opd)/(sameIndex+1)
finalSplitList.at[(len(finalSplitList)-1),'Octets/Duration']=opdFinal
sameIndex=sameIndex+1
flag=0
else:
finalSplitList = finalSplitList.append({'Time':timeRange, 'Octets/Duration':opd}, ignore_index=True)
flag=0
sameIndex=1
index=index+1
else:
if(epochtime2<epochtime):
index=index+1
else:
if(flag):
finalSplitList = finalSplitList.append({'Time':timeRange, 'Octets/Duration':0}, ignore_index=True)
i=i+1
flag=1
i=i+1
if (i<len(epochListDF)):
while (i<len(epochListDF)):
epochtime = int(epochListDF['Epoch list'][i])
startTime = time.strftime("%A -- %H:%M:%S --", time.localtime(epochtime))
endtime = epochtime+10
endTime = time.strftime("%H:%M:%S", (time.localtime(endtime)))
timeRange = startTime + endTime
finalSplitList = finalSplitList.append({'Time':timeRange, 'Octets/Duration':0}, ignore_index=True)
i=i+1
return finalSplitList
def z_val(r_1a2a, r_1a2b, r_2a2b, N):
rm2 = ((r_1a2a ** 2) + (r_1a2b ** 2)) / 2
f = (1 - r_2a2b) / (2 * (1 - rm2))
h = (1 - f * rm2) / (1 - rm2)
z_1a2a = 0.5 * (math.log10((1 + r_1a2a)/(1 - r_1a2a)))
z_1a2b = 0.5 * (math.log10((1 + r_1a2b)/(1 - r_1a2b)))
z = (z_1a2a - z_1a2b) * ((N-3) ** 0.5) / (2 * (1 - r_2a2b) * h)
return z
def p_val(z):
p = 0.3275911
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
sign = None
if z < 0.01:
sign = -1
else:
sign = 1
x = abs(z) / (2 ** 0.5)
t = 1 / (1 + p * x)
erf = 1.0 - (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * math.exp(-x * x)
return 0.5 * (1 + sign * erf)
def week_gen(files, delN):
flags = []
for i in range (0, len(files)):
flags.append(0)
firstweek = | pd.DataFrame() | pandas.DataFrame |
import os
import operator
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def collapse_phn(char):
collapse_dict = {"b":"b", "bcl":"h#", "d":"d", "dcl":"h#", "g":"g", "gcl":"h#", "p":"p", "pcl":"h#", "t":"t", "tcl":"h#", "k":"k", "kcl":"h#", "dx":"dx", "q":"q", "jh":"jh", "ch":"ch", "s":"s", "sh":"sh", "z":"z", "zh":"sh",
"f":"f", "th":"th", "v":"v", "dh":"dh", "m":"m", "n":"n", "ng":"ng", "em":"m", "en":"n", "eng":"ng", "nx":"n", "l":"l", "r":"r", "w":"w", "y":"y",
"hh":"hh", "hv":"hh", "el":"l", "iy":"iy", "ih":"ih", "eh":"eh", "ey":"ey", "ae":"ae", "aa":"aa", "aw":"aw", "ay":"ay", "ah":"ah", "ao":"aa", "oy":"oy",
"ow":"ow", "uh":"uh", "uw":"uw", "ux":"uw", "er":"er", "ax":"ah", "ix":"ih", "axr":"er", "ax-h":"ah", "pau":"h#", "epi":"h#", "h#": "h#"}
return collapse_dict[char]
# generates number of phonemes in complete training set
def count_train_phns():
source_path = os.path.join('TIMIT', 'TRAIN')
phn_count = {}
curr_phn = 1
# extracts all instances of non-silence phonemes
for dir_name, subdir_list, file_list in os.walk(source_path):
for file in file_list:
if file.endswith('.PHN'):
# load phoneme description
lines = []
with open(os.path.join(dir_name, file)) as f:
[lines.append(line.rstrip().split(' ')) for line in f.readlines()]
for line in lines:
col_phone = collapse_phn(line[2])
# ignore silence
if(col_phone == 'h#'):
continue
# record phoneme count
if col_phone not in phn_count:
phn_count[col_phone] = 1
else:
phn_count[col_phone] += 1
print('Extracted phoneme {} out of {}'.format(curr_phn, 141203), end='\r')
curr_phn += 1
sorted_phn = sorted(phn_count.items(), key=operator.itemgetter(1), reverse=True)
with open('data/train_phn_occurrence.txt', 'w+') as f:
[f.write(phn[0] + ' ' + str(phn[1]) + '\n') for phn in sorted_phn]
# calculates phoneme accuracy of model during stratification
def strat_accuracy_pct():
strat_phn_occurrence = {}
phn_occurrence = {}
with open('config/phn_occurrence.txt') as f:
for line in f.readlines():
phn_occurrence[line.split()[0]] = int(line.split()[1])
with open('config/strat_phn_occurrence.txt') as f:
for line in f.readlines():
strat_phn_occurrence[line.split()[0]] = int(line.split()[1])
acc = {}
for phn, num in phn_occurrence.items():
if phn not in strat_phn_occurrence:
acc[phn] = 0
else:
acc[phn] = strat_phn_occurrence[phn] / phn_occurrence[phn]
sorted_acc = sorted(acc.items(), key=operator.itemgetter(1), reverse=True)
print(sorted_acc)
with open('data/accuracy.txt', 'w+') as f:
[f.write(tuple[0] + ' ' + str(tuple[1]) + '\n') for tuple in sorted_acc]
# calculates average predicted phoneme sequence length
def calc_avg_len():
length = 0
with open('data/original1/output_len.txt') as f:
for line in f.readlines():
length += float(line.split(' ')[1])
# with open('data/stratified2/output_len.txt') as f:
# for line in f.readlines():
# length += float(line.split(' ')[1])
print(length / 55)
# averages data together
def average_data():
df = pd.DataFrame()
df = df.append( | pd.read_csv('data/original1/original1.csv', index_col=0) | pandas.read_csv |
"""Kodoja pipeline."""
from __future__ import print_function
import subprocess
import pandas as pd
import random
import os
import pickle
from math import isnan
from Bio import SeqIO
from Bio.SeqIO.FastaIO import SimpleFastaParser
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# The user-facing scripts will all report this version number via --version:
version = "0.0.10"
def check_path(dirs):
"""Check if directory path has '/' at the end.
Return value is either '/' or empty string ''.
"""
if dirs[-1] != "/":
return "/"
else:
return ""
def test_format(file1, user_format):
"""Check data format.
Check if data is in the fasta or fastq format and
assert the user has specified the correct format for
the data provided.
Return an assert stament and stop or continue.
"""
with open(file1) as myfile:
# Would have used xrange under Python 2, but want this to work
# on both Python 2 and 3 and a list of 8 elements is tiny.
small_file = [next(myfile) for x in range(8)]
file_format = "not identified"
if small_file[0][0] == "@" and small_file[4][0] == "@":
file_format = "fastq"
if small_file[0][0] == ">":
file_format = "fasta"
assert (file_format == "fasta") | (file_format == "fastq"), \
"Cannot proceed with file as it is not in fasta or fastq format."
assert user_format == file_format, \
"File has been detected to be in " + file_format + \
" format rather than " + user_format + " format."
def rename_seqIDs(input_file, out_dir, user_format, paired=False):
"""Rename sequence identifiers to just the read number.
Write a new file where each sequence ID is replaced with
the read number (counting from one).
Does not attempt to include "/1" and "/2" name suffixes, nor
include "1:" or "2:" in the description, for paired reads.
Returns dictionary mapping the sequence number to the old
identifier (first word only from the description line,
and if paired without any "/1" or "/2" suffix).
"""
if paired == 2:
output_file = os.path.join(out_dir, "renamed_file_2." + user_format)
elif paired == 1 or paired is False:
output_file = os.path.join(out_dir, "renamed_file_1." + user_format)
else:
raise ValueError("Wanted 1, 2 or False - not %r" % paired)
id_dict = {}
with open(input_file, 'r') as in_file, open(output_file, 'w') as out_file:
if user_format == 'fasta':
for index, (title, seq) in enumerate(SimpleFastaParser(in_file)):
name = title.split(None, 1)[0]
if (paired == 1 and name.endswith("/1")) or (paired == 2 and name.endswith("/2")):
name = name[:-2]
id_dict[index + 1] = name
out_file.write(">%i\n%s\n" % (index + 1, seq))
else:
for index, (title, seq, qual) in enumerate(FastqGeneralIterator(in_file)):
name = title.split(None, 1)[0]
if (paired == 1 and name.endswith("/1")) or (paired == 2 and name.endswith("/2")):
name = name[:-2]
id_dict[index + 1] = name
out_file.write("@%i\n%s\n+\n%s\n" % (index + 1, seq, qual))
return id_dict
def check_file(file1, out_dir, user_format, file2=False):
"""Rename sequnce ids and check PE files.
Rename sequnce ids for SE or PE files to ensure
consistency between kraken and kaiju (which modify
id names). Create dictionaries containing real IDs and
renamed version and pickle. If data is PE, assert
paired files have the same number of entries and if
the paired reads are matched by choosing random
entries and confirming the IDs match (optionally
with /1 and /2 suffixes).
"""
if file2:
ids1 = rename_seqIDs(file1, out_dir, user_format, paired=1)
ids2 = rename_seqIDs(file2, out_dir, user_format, paired=2)
with open(os.path.join(out_dir, 'ids2.pkl'), 'wb') as pkl_dict:
pickle.dump(ids2, pkl_dict, protocol=pickle.HIGHEST_PROTOCOL)
assert len(ids1) == len(ids2), \
"Paired files have different number of reads"
for values in range(1, 50):
random_id = random.randint(1, len(ids1) - 1)
id_1 = ids1[random_id]
id_2 = ids2[random_id]
assert id_1 == id_2, \
("Paired-end sequences don't match, e.g. %r vs %r"
% (id_1, id_2))
else:
ids1 = rename_seqIDs(file1, out_dir, user_format, paired=False)
with open(os.path.join(out_dir, "log_file.txt"), "a") as log_file:
log_file.write("Number of sequences = " + str(list(ids1)[-1]) + "\n")
with open(os.path.join(out_dir, 'ids1.pkl'), 'wb') as pkl_dict:
pickle.dump(ids1, pkl_dict, protocol=pickle.HIGHEST_PROTOCOL)
def fastqc_trim(out_dir, file1, trim_minlen, threads, adapter_file, file2=False):
"""Quality and adaptor trimming of fastq files.
Takes fastq data (either single or paired), trims sequences using trimmomatic
(in the case of paried end reads, it deletes extra files) and uses fastqc to
show the user what the sequence quality looks like after trimming.
Returns trimmed sequence files and fastq analysis files
"""
trimAdapt_command = " LEADING:20 TRAILING:20 MINLEN:" + \
str(trim_minlen)
if adapter_file:
trimAdapt_command += " ILLUMINACLIP:" + adapter_file + ":2:30:10"
if file2:
PE_trim_command = "trimmomatic PE -threads " + str(threads) + " " + file1 + " " + file2 + \
" " + os.path.join(out_dir, "trimmed_read1") + \
" " + os.path.join(out_dir, "PE_trimmed_data_1U") + \
" " + os.path.join(out_dir, "trimmed_read2") + \
" " + os.path.join(out_dir, "PE_trimmed_data_2U") + trimAdapt_command
subprocess.check_call(PE_trim_command, shell=True)
os.remove(os.path.join(out_dir, "PE_trimmed_data_1U"))
os.remove(os.path.join(out_dir, "PE_trimmed_data_2U"))
subprocess.check_call("fastqc " + os.path.join(out_dir, "trimmed_read1") +
" -o " + out_dir, shell=True)
subprocess.check_call("fastqc " + os.path.join(out_dir, "trimmed_read2") +
" -o " + out_dir, shell=True)
else:
subprocess.check_call("trimmomatic SE -threads " + str(threads) + " " + file1 +
" " + os.path.join(out_dir, "trimmed_read1") +
" " + trimAdapt_command, shell=True)
subprocess.check_call("fastqc " + os.path.join(out_dir, "trimmed_read1") +
" -o " + out_dir, shell=True)
def kraken_classify(out_dir, kraken_file1, threads, user_format, kraken_db, kraken_file2=False,
quick_minhits=False, preload=False):
"""Kraken classification.
Add appropiate switches for kraken command (format, preload, minimum hits,
if paired or single end) and call
kraken command, followed by kraken-translate to get full taxonomy for each
sequence based on thir sequence id (Seq_tax: d__superkingdom, k__kingdom,
p__phylum, c__class, o__order, f__family, g__genus, s__species).
Return kraken_table file with a row for each sequence and kraken classification
(or unclassified) and kraken_labels file witha row for each sequence that was
classified by kraken with full taxonomy.
"""
if user_format == "fastq":
format_switch = " --fastq-input"
elif user_format == "fasta":
format_switch = " --fasta-input"
if preload:
kraken_command = "kraken --preload "
else:
kraken_command = "kraken "
kraken_command += "--threads " + str(threads) + " --db " + kraken_db + format_switch
if quick_minhits:
kraken_command += " --quick --min-hits " + str(quick_minhits)
if kraken_file2:
kraken_command += " --paired " + kraken_file1 + " " + \
kraken_file2 + " > " + os.path.join(out_dir, "kraken_table.txt")
else:
kraken_command += " " + kraken_file1 + " > " + os.path.join(out_dir, "kraken_table.txt")
subprocess.check_call(kraken_command, shell=True)
subprocess.check_call("kraken-translate --mpa-format --db " + kraken_db +
" " + os.path.join(out_dir, "kraken_table.txt") + " > " +
os.path.join(out_dir, "kraken_labels.txt"), shell=True)
def format_result_table(out_dir, data_table, data_labels, table_colNames):
"""Merge classification and label data.
Merge the classification data (either kraken or kaiju) with the 'label'
data which has full taxonomy for the classified sequence.
Return merged table
"""
label_colNames = ["Seq_ID", "Seq_tax"]
seq_data = pd.read_csv(os.path.join(out_dir, data_table),
sep="\t", header=None, names=table_colNames,
index_col=False)
seq_labelData = pd.read_csv(os.path.join(out_dir, data_labels),
sep="\t", header=None,
names=label_colNames)
seq_result = | pd.merge(seq_data, seq_labelData, on='Seq_ID', how='outer') | pandas.merge |
import numpy as np
import pandas as pd
import pytest
from hypothesis import given, settings
from pandas.testing import assert_frame_equal
from janitor.testing_utils.strategies import (
conditional_df,
conditional_right,
conditional_series,
)
@pytest.mark.xfail(reason="empty object will pass thru")
@given(s=conditional_series())
def test_df_empty(s):
"""Raise ValueError if `df` is empty."""
df = pd.DataFrame([], dtype="int", columns=["A"])
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@pytest.mark.xfail(reason="empty object will pass thru")
@given(df=conditional_df())
def test_right_empty(df):
"""Raise ValueError if `right` is empty."""
s = pd.Series([], dtype="int", name="A")
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_right_df(df):
"""Raise TypeError if `right` is not a Series/DataFrame."""
with pytest.raises(TypeError):
df.conditional_join({"non": [2, 3, 4]}, ("A", "non", "=="))
@given(df=conditional_df(), s=conditional_series())
def test_right_series(df, s):
"""Raise ValueError if `right` is not a named Series."""
with pytest.raises(ValueError):
df.conditional_join(s, ("A", "non", "=="))
@given(df=conditional_df())
def test_df_MultiIndex(df):
"""Raise ValueError if `df` columns is a MultiIndex."""
with pytest.raises(ValueError):
df.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(
pd.Series([2, 3, 4], name="A"), (("A", "F"), "non", "==")
)
@given(df=conditional_df())
def test_right_MultiIndex(df):
"""Raise ValueError if `right` columns is a MultiIndex."""
with pytest.raises(ValueError):
right = df.copy()
right.columns = [list("ABCDE"), list("FGHIJ")]
df.conditional_join(right, (("A", "F"), "non", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_conditions_exist(df, s):
"""Raise ValueError if no condition is provided."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s)
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_type(df, s):
"""Raise TypeError if any condition in conditions is not a tuple."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("A", "B", ""), ["A", "B"])
@given(df=conditional_df(), s=conditional_series())
def test_check_condition_length(df, s):
"""Raise ValueError if any condition is not length 3."""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("A", "B", "C", "<"))
df.conditional_join(s, ("A", "B", ""), ("A", "B"))
@given(df=conditional_df(), s=conditional_series())
def test_check_left_on_type(df, s):
"""Raise TypeError if left_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, (1, "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_right_on_type(df, s):
"""Raise TypeError if right_on is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", 1, "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_type(df, s):
"""Raise TypeError if the operator is not a string."""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", 1))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_df(df, s):
"""
Raise ValueError if `left_on`
can not be found in `df`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("C", "B", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_column_exists_right(df, s):
"""
Raise ValueError if `right_on`
can not be found in `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "A", ">="))
@given(df=conditional_df(), s=conditional_series())
def test_check_op_correct(df, s):
"""
Raise ValueError if `op` is not any of
`!=`, `<`, `>`, `>=`, `<=`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "=!"))
@given(df=conditional_df(), s=conditional_series())
def test_check_how_type(df, s):
"""
Raise TypeError if `how` is not a string.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how=1)
@given(df=conditional_df(), s=conditional_series())
def test_check_how_value(df, s):
"""
Raise ValueError if `how` is not one of
`inner`, `left`, or `right`.
"""
with pytest.raises(ValueError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), how="INNER")
@given(df=conditional_df(), right=conditional_right())
def test_dtype_strings_non_equi(df, right):
"""
Raise ValueError if the dtypes are both strings
on a non-equi operator.
"""
with pytest.raises(ValueError):
df.conditional_join(right, ("C", "Strings", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_not_permitted(df, s):
"""
Raise ValueError if dtype of column in `df`
is not an acceptable type.
"""
df["F"] = pd.Timedelta("1 days")
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("F", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_str(df, s):
"""
Raise ValueError if dtype of column in `df`
does not match the dtype of column from `right`.
"""
with pytest.raises(ValueError):
s.name = "A"
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_dtype_category_non_equi(df, s):
"""
Raise ValueError if dtype is category,
and op is non-equi.
"""
with pytest.raises(ValueError):
s.name = "A"
s = s.astype("category")
df["C"] = df["C"].astype("category")
df.conditional_join(s, ("C", "A", "<"))
@given(df=conditional_df(), s=conditional_series())
def test_check_sort_by_appearance_type(df, s):
"""
Raise TypeError if `sort_by_appearance` is not a boolean.
"""
with pytest.raises(TypeError):
s.name = "B"
df.conditional_join(s, ("B", "B", "<"), sort_by_appearance="True")
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_floats(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints(df, right):
"""Test output for a single condition. "<"."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="2"), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_ints_extension_array(df, right):
"""Test output for a single condition. "<"."""
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_equal(df, right):
"""Test output for a single condition. "<=". DateTimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_less_than_date(df, right):
"""Test output for a single condition. "<". Dates"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "<"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_datetime(df, right):
"""Test output for a single condition. ">". Datetimes"""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} >= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_floats_floats(df, right):
"""Test output for a single condition. ">"."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_greater_than_ints_extension_array(df, right):
"""Test output for a single condition. ">="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} > {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_numeric(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_ints_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["A", "Integers"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_floats_only(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["B", "Numeric"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_not_equal_datetime(df, right):
"""Test output for a single condition. "!="."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.dropna(subset=["E", "Dates"])
.query(f"{left_on} != {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "!="), how="inner", sort_by_appearance=True
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_string(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@pytest.mark.xfail(
reason="""sometimes, categories are coerced to objects;
might be a pandas version issue.
"""
)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_category(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["C", "Strings"]
df = df.assign(C=df["C"].astype("category"))
right = right.assign(Strings=right["Strings"].astype("category"))
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_numeric(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["A", "Integers"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
df.loc[0, "A"] = pd.NA
right.loc[0, "Integers"] = pd.NA
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_single_condition_equality_datetime(df, right):
"""Test output for a single condition. "=="."""
left_on, right_on = ["E", "Dates"]
expected = df.dropna(subset=[left_on]).merge(
right.dropna(subset=[right_on]), left_on=left_on, right_on=right_on
)
expected = expected.reset_index(drop=True)
expected = expected.filter([left_on, right_on])
actual = df.conditional_join(
right, (left_on, right_on, "=="), how="inner", sort_by_appearance=False
)
actual = actual.filter([left_on, right_on])
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_left(df, right):
"""Test output when `how==left`. "<="."""
left_on, right_on = ["A", "Integers"]
expected = (
df.assign(t=1, index=np.arange(len(df)))
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = df.join(
expected.filter(right.columns), how="left", sort=False
).reset_index(drop=True)
actual = df.conditional_join(
right, (left_on, right_on, "<="), how="left", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@given(df=conditional_df(), right=conditional_right())
def test_how_right(df, right):
"""Test output when `how==right`. ">"."""
left_on, right_on = ["E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, index=np.arange(len(right))), on="t")
.query(f"{left_on} > {right_on}")
)
expected = expected.set_index("index")
expected.index.name = None
expected = (
expected.filter(df.columns)
.join(right, how="right", sort=False)
.reset_index(drop=True)
)
actual = df.conditional_join(
right, (left_on, right_on, ">"), how="right", sort_by_appearance=True
)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_dates(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("E", "Dates", "Dates_Right")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ge_and_le_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">="),
(middle, right_on, "<="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_le_and_ge_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} <= {middle} <= {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, right_on, "<="),
(middle, left_on, ">="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers(df, right):
"""Test output for interval conditions."""
middle, left_on, right_on = ("B", "Numeric", "Floats")
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query(f"{left_on} < {middle} < {right_on}")
.reset_index(drop=True)
)
expected = expected.filter([left_on, middle, right_on])
actual = df.conditional_join(
right,
(middle, left_on, ">"),
(middle, right_on, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([left_on, middle, right_on])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_(df, right):
"""
Test output for multiple conditions.
"""
first, second, third = ("Numeric", "Floats", "B")
expected = (
right.assign(t=1)
.merge(df.assign(t=1), on="t")
.query(f"{first} > {third} and {second} < {third}")
.reset_index(drop=True)
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter([first, second, third])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_left_join(df, right):
"""
Test output for multiple conditions, and how is `left`.
"""
first, second, third = ("Numeric", "Floats", "B")
right = right.assign(t=1, check=range(len(right)))
df = df.assign(t=1)
expected = right.merge(df, on="t").query(
f"{first} > {third} and {second} < {third}"
)
drop = right.columns.difference(["check"])
expected = right.merge(
expected.drop(columns=[*drop]), on="check", how="left", sort=False
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="left",
sort_by_appearance=True,
)
actual = actual.droplevel(0, 1)
actual = actual.loc[:, [first, second, third]]
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_gt_and_lt_numbers_right_join(df, right):
"""
Test output for multiple conditions, and how is `right`.
"""
first, second, third = ("Numeric", "Floats", "B")
df = df.assign(t=1, check=range(len(df)))
right = right.assign(t=1)
expected = right.merge(df, on="t").query(
f"{first} > {third} and {second} < {third}"
)
drop = df.columns.difference(["check"])
expected = expected.drop(columns=[*drop]).merge(
df, on="check", how="right", sort=False
)
expected = expected.filter([first, second, third])
actual = right.conditional_join(
df,
(first, third, ">"),
(second, third, "<"),
how="right",
sort_by_appearance=True,
)
actual = actual.droplevel(0, 1)
actual = actual.loc[:, [first, second, third]]
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_extension(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
df = df.assign(A=df["A"].astype("Int64"))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_extension_right(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "B", "Numeric"]
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_ne_dates(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("E", "Dates", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_multiple_ne_dates(df, right):
"""
Test output for multiple conditions. `!=`
"""
filters = ["A", "Integers", "E", "Dates", "B", "Numeric"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and E != Dates and B != Numeric")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", "!="),
("E", "Dates", "!="),
("B", "Numeric", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_eq_and_ne(df, right):
"""Test output for equal and not equal conditions."""
A, B, C, D = ("B", "Numeric", "E", "Dates")
expected = (
df.merge(right, left_on=A, right_on=B)
.dropna(subset=[A, B])
.query(f"{C} != {D}")
.reset_index(drop=True)
)
expected = expected.filter([A, B, C, D])
actual = df.conditional_join(
right,
(A, B, "=="),
(C, D, "!="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter([A, B, C, D])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_dual_conditions_ne_and_eq(df, right):
"""Test output for equal and not equal conditions."""
A, B, C, D = ("A", "Integers", "E", "Dates")
expected = (
df.merge(right, left_on=C, right_on=D)
.dropna(subset=[C, D])
.query(f"{A} != {B}")
.reset_index(drop=True)
)
expected = expected.filter([A, B, C, D])
actual = df.conditional_join(
right,
(A, B, "!="),
(C, D, "=="),
how="inner",
sort_by_appearance=False,
)
actual = actual.filter([A, B, C, D])
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_gt_lt_ne_conditions(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A > Integers and B < Numeric and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("A", "Integers", ">"),
("B", "Numeric", "<"),
("E", "Dates", "!="),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_gt_lt_ne_start(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
expected = (
df.assign(t=1)
.merge(right.assign(t=1, C="C"), on="t")
.query("A > Integers and B < Numeric and E != Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", "!="),
("A", "Integers", ">"),
("B", "Numeric", "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_le_ne_extension_array(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype(pd.Int64Dtype()))
expected = (
df.assign(t=1)
.merge(right.assign(t=1), on="t")
.query("A != Integers and B < Numeric and E >= Dates")
.reset_index(drop=True)
)
expected = expected.filter(filters)
actual = df.conditional_join(
right,
("E", "Dates", ">="),
("A", "Integers", "!="),
("B", "Numeric", "<"),
how="inner",
sort_by_appearance=True,
)
actual = actual.filter(filters)
assert_frame_equal(expected, actual)
@settings(deadline=None)
@given(df=conditional_df(), right=conditional_right())
def test_ge_lt_ne_extension(df, right):
"""
Test output for multiple conditions.
"""
filters = ["A", "Integers", "B", "Numeric", "E", "Dates"]
df = df.assign(A=df["A"].astype("Int64"))
right = right.assign(Integers=right["Integers"].astype( | pd.Int64Dtype() | pandas.Int64Dtype |
import os
import sys
sys.path.insert(0, '.') # make runable from src/
# external libraries
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# For path referencing
from config.definitions import ROOT_DIR
# Python's built in libs
from collections import Counter
# Global constants
features_m = {
'RI': 'refractive_index',
'Na': 'sodium',
'Mg': 'magnesium',
'Al': 'aluminium',
'Si': 'silicone',
'K': 'potassium',
'Ca': 'calcium',
'Ba': 'barium',
'Fe': 'iron'
}
features_names = ['refractive_index', 'sodium', 'magnesium', 'aluminium', 'silicone', 'potassium', 'calcium', 'barium', 'iron']
classes_m = {
1: 'window_from_building_(float_processed)',
2: 'window_from_building_(non_float_processed)',
3: 'window_from_vehicle',
5: 'container',
6: 'tableware',
7: 'headlamp'
}
def run_preprocessing():
# Save the info about the process into a specified file
old = sys.stdout
out_path = os.path.join(ROOT_DIR, 'data', 'metadata', 'inspect_clean_transform_info.txt')
sys.stdout = open(out_path, 'w')
# Load data
train = pd.read_csv(os.path.join(ROOT_DIR, 'data', 'raw', 'df_train.csv'), delimiter=',', header=0)
test = pd.read_csv(os.path.join(ROOT_DIR, 'data', 'raw', 'df_test.csv'), delimiter=',', header=0)
# Initial inspection
print('-- Initial inspection ', end='-'*50 + '\n')
print('Training data')
print(train.head())
print(end='\n\n')
print('Test data')
print(test.head())
print(end='\n\n')
print(f'Training data shape: {train.shape} | Test data shape: {test.shape}')
print(f'There is in total {len(np.unique(train["type"]))} classes labeled as: {np.unique(train["type"])}')
print(end='\n\n')
# Split the data
x_train, x_val, y_train, y_val = train_test_split(train.iloc[:, :-1],
train.iloc[:, -1],
test_size=0.33,
random_state=42)
x_test, y_test = test.iloc[:, :-1], test.iloc[:, -1]
# Define transformations method
scaler = StandardScaler(with_mean=True, with_std=True) # Mean zero, unit variance
pca = PCA(random_state=42)
# Transform
print('-- Label distribution ', end='-'*50 + '\n')
print('\nMap from key to actual name:')
print('-'*40)
for k, v in classes_m.items():
print(f'{k} --> {v}')
print('-'*40)
data = [[x_train, y_train, 'train'], [x_val, y_val, 'val'], [x_test, y_test, 'test']]
expl_var = dict()
for t in data:
# Load and transform
X, y, path = t
X_scaled = scaler.fit_transform(X)
X_pca = pca.fit_transform(X_scaled)
expl_var[path] = pca.explained_variance_ratio_
# Save
X.to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'X_org.csv'), index=False)
pd.DataFrame(X_scaled).to_csv(os.path.join(ROOT_DIR, 'data', 'transformed', path, 'X_scaled.csv'), index=False, header=False)
| pd.DataFrame(X_pca) | pandas.DataFrame |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": pandas.StringDtype(),
"BitErrorsNonHost1": pandas.StringDtype(),
"BitErrorsNonHost2": pandas.StringDtype(),
"BitErrorsNonHost3": pandas.StringDtype(),
"BitErrorsNonHost4": pandas.StringDtype(),
"BitErrorsNonHost5": pandas.StringDtype(),
"BitErrorsNonHost6": pandas.StringDtype(),
"BitErrorsNonHost7": pandas.StringDtype(),
"BitErrorsNonHost8": pandas.StringDtype(),
"BitErrorsNonHost9": pandas.StringDtype(),
"BitErrorsNonHost10": pandas.StringDtype(),
"BitErrorsNonHost11": pandas.StringDtype(),
"BitErrorsNonHost12": pandas.StringDtype(),
"BitErrorsNonHost13": pandas.StringDtype(),
"BitErrorsNonHost14": pandas.StringDtype(),
"BitErrorsNonHost15": pandas.StringDtype(),
"ECCFailNonHost": pandas.StringDtype(),
"NSversion": pandas.StringDtype(),
"numBands": pandas.StringDtype(),
"minErase": pandas.StringDtype(),
"maxErase": pandas.StringDtype(),
"avgErase": pandas.StringDtype(),
"minMVolt": pandas.StringDtype(),
"maxMVolt": pandas.StringDtype(),
"avgMVolt": pandas.StringDtype(),
"minMAmp": pandas.StringDtype(),
"maxMAmp": pandas.StringDtype(),
"avgMAmp": pandas.StringDtype(),
"comment1": pandas.StringDtype(), # @todo force rename
"minMVolt12v": pandas.StringDtype(),
"maxMVolt12v": pandas.StringDtype(),
"avgMVolt12v": pandas.StringDtype(),
"minMAmp12v": pandas.StringDtype(),
"maxMAmp12v": pandas.StringDtype(),
"avgMAmp12v": pandas.StringDtype(),
"nearMissSector": pandas.StringDtype(),
"nearMissDefect": pandas.StringDtype(),
"nearMissOverflow": pandas.StringDtype(),
"replayUNC": pandas.StringDtype(),
"Drive_Id": pandas.StringDtype(),
"indirectionMisses": pandas.StringDtype(),
"BitErrorsHost16": pandas.StringDtype(),
"BitErrorsHost17": pandas.StringDtype(),
"BitErrorsHost18": pandas.StringDtype(),
"BitErrorsHost19": pandas.StringDtype(),
"BitErrorsHost20": pandas.StringDtype(),
"BitErrorsHost21": pandas.StringDtype(),
"BitErrorsHost22": pandas.StringDtype(),
"BitErrorsHost23": pandas.StringDtype(),
"BitErrorsHost24": pandas.StringDtype(),
"BitErrorsHost25": pandas.StringDtype(),
"BitErrorsHost26": pandas.StringDtype(),
"BitErrorsHost27": pandas.StringDtype(),
"BitErrorsHost28": pandas.StringDtype(),
"BitErrorsHost29": pandas.StringDtype(),
"BitErrorsHost30": pandas.StringDtype(),
"BitErrorsHost31": pandas.StringDtype(),
"BitErrorsHost32": pandas.StringDtype(),
"BitErrorsHost33": pandas.StringDtype(),
"BitErrorsHost34": pandas.StringDtype(),
"BitErrorsHost35": pandas.StringDtype(),
"BitErrorsHost36": pandas.StringDtype(),
"BitErrorsHost37": pandas.StringDtype(),
"BitErrorsHost38": pandas.StringDtype(),
"BitErrorsHost39": pandas.StringDtype(),
"BitErrorsHost40": pandas.StringDtype(),
"XORRebuildSuccess": pandas.StringDtype(),
"XORRebuildFail": pandas.StringDtype(),
"BandReloForError": pandas.StringDtype(),
"mrrSuccess": pandas.StringDtype(),
"mrrFail": pandas.StringDtype(),
"mrrNudgeSuccess": pandas.StringDtype(),
"mrrNudgeHarmless": pandas.StringDtype(),
"mrrNudgeFail": pandas.StringDtype(),
"totalErases": pandas.StringDtype(),
"dieOfflineCount": pandas.StringDtype(),
"curtemp": pandas.StringDtype(),
"mintemp": pandas.StringDtype(),
"maxtemp": pandas.StringDtype(),
"oventemp": pandas.StringDtype(),
"allZeroSectors": pandas.StringDtype(),
"ctxRecoveryEvents": pandas.StringDtype(),
"ctxRecoveryErases": pandas.StringDtype(),
"NSversionMinor": pandas.StringDtype(),
"lifeMinTemp": pandas.StringDtype(),
"lifeMaxTemp": pandas.StringDtype(),
"powerCycles": pandas.StringDtype(),
"systemReads": pandas.StringDtype(),
"systemWrites": pandas.StringDtype(),
"readRetryOverflow": pandas.StringDtype(),
"unplannedPowerCycles": pandas.StringDtype(),
"unsafeShutdowns": pandas.StringDtype(),
"defragForcedReloCount": pandas.StringDtype(),
"bandReloForBDR": pandas.StringDtype(),
"bandReloForDieOffline": pandas.StringDtype(),
"bandReloForPFail": pandas.StringDtype(),
"bandReloForWL": pandas.StringDtype(),
"provisionalDefects": pandas.StringDtype(),
"uncorrectableProgErrors": pandas.StringDtype(),
"powerOnSeconds": pandas.StringDtype(),
"bandReloForChannelTimeout": pandas.StringDtype(),
"fwDowngradeCount": pandas.StringDtype(),
"dramCorrectablesTotal": pandas.StringDtype(),
"hb_id": pandas.StringDtype(),
"dramCorrectables1to1": pandas.StringDtype(),
"dramCorrectables4to1": pandas.StringDtype(),
"dramCorrectablesSram": pandas.StringDtype(),
"dramCorrectablesUnknown": pandas.StringDtype(),
"pliCapTestInterval": pandas.StringDtype(),
"pliCapTestCount": pandas.StringDtype(),
"pliCapTestResult": pandas.StringDtype(),
"pliCapTestTimeStamp": pandas.StringDtype(),
"channelHangSuccess": pandas.StringDtype(),
"channelHangFail": pandas.StringDtype(),
"BitErrorsHost41": pandas.StringDtype(),
"BitErrorsHost42": pandas.StringDtype(),
"BitErrorsHost43": pandas.StringDtype(),
"BitErrorsHost44": pandas.StringDtype(),
"BitErrorsHost45": pandas.StringDtype(),
"BitErrorsHost46": pandas.StringDtype(),
"BitErrorsHost47": pandas.StringDtype(),
"BitErrorsHost48": pandas.StringDtype(),
"BitErrorsHost49": pandas.StringDtype(),
"BitErrorsHost50": pandas.StringDtype(),
"BitErrorsHost51": pandas.StringDtype(),
"BitErrorsHost52": pandas.StringDtype(),
"BitErrorsHost53": pandas.StringDtype(),
"BitErrorsHost54": pandas.StringDtype(),
"BitErrorsHost55": pandas.StringDtype(),
"BitErrorsHost56": pandas.StringDtype(),
"mrrNearMiss": pandas.StringDtype(),
"mrrRereadAvg": pandas.StringDtype(),
"readDisturbEvictions": pandas.StringDtype(),
"L1L2ParityError": pandas.StringDtype(),
"pageDefects": pandas.StringDtype(),
"pageProvisionalTotal": pandas.StringDtype(),
"ASICTemp": pandas.StringDtype(),
"PMICTemp": pandas.StringDtype(),
"size": pandas.StringDtype(),
"lastWrite": pandas.StringDtype(),
"timesWritten": pandas.StringDtype(),
"maxNumContextBands": | pandas.StringDtype() | pandas.StringDtype |
import numpy as np
import pandas as pd
import tarfile
import sys
import os
import scipy.spatial
from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
import collections
import json
import warnings
import pickle
import multiprocessing
import parasail
import pwseqdist
from zipdist.zip2 import Zipdist2
from . import repertoire_db
from . import pgen
from . import mappers
from . import pairwise
# includes tools for use with explore.py
#from paths import path_to_matrices
#This replaces: from tcrdist.cdr3s_human import pb_cdrs
pb_cdrs = repertoire_db.generate_pbr_cdr()
class TCRrep:
"""
Class for managing a T-Cell Receptor Repertoire (TCRrep) analysis. Produce
a distance measure based on comparisons from multiple T-Cell receptor
complementarity-determining regions (CDRs)
Attributes
----------
cell_df : pandas.core.frame.DataFrame
input data at the level of individual cell level
clone_df : pandas.core.frame.DataFrame
deduplicated data frame at the level of unique clones
index_cols : list
list of strings, indicating columns to group cells to clones
organism : string
either "human" or "mouse"
meta_cols : list
list of strings, indicating metadata columns (e.g. hla_type)
chains : list
list of strings containing one or more of 'alpha', 'beta', 'gamma' or 'delta'
stored_tcrdist : list
list containing all previously generated outputs of
`TCRrep.compute_paired_tcrdist`
paired_tcrdist : ndarray
most recent output of :py:meth:`tcrdist.repertoire.TCRrep.compute_paired_tcrdist`
paired_tcrdist_weights : dictionary
CDR weights used to generate the most recent output of
TCRrep.compute_paired_tcrdist`
all_genes : dictionary
dictionary of reference TCRs
Methods
-------
TCRrep.infer_cdrs_from_v_gene()
infer CDR amino acid sequences from v-gene specified
deduplicate()
remove duplicate clones by grouping
compute_pairwise_all()
compute pairwise distances on deduplicated data for all regions in
a chain. Alternatively can compute distance between a
compute_paired_tcrdist()
calculate weighted pairwise distance across all CDRs
generate_ref_genes_from_db()
generates all_genes attribute a dictionary of reference TCRs
"""
def __init__(self,
cell_df,
chains=['alpha', 'beta'],
organism = "human",
db_file = "alphabeta_db.tsv"):
self.db_file = db_file
self.cell_df = cell_df
self.chains = chains
self.organism = organism
self.pwdist_df = None
self.clone_df = None
self.index_cols = []
self.stored_tcrdist = []
self.paired_tcrdist = None
self.paired_tcrdist_weights = None
self.meta_cols = None
self.project_id = "<Your TCR Repertoire Project>"
self.all_genes = None
self.imgt_aligned_status = None
# VALIDATION OF INPUTS
# check that chains are valid.
self._validate_organism()
self._validate_chains()
# check that is a pd.DataFrame
self._validate_cell_df()
# INIT OF SPECIFIC ATTRIBUTES BASED ON SELECTED CHAINS
self._initialize_chain_specific_attributes()
# INIT the REFERENCE DB see repertoire_db.py
self.generate_ref_genes_from_db(db_file)
def __repr__(self):
return 'tcrdist.repertoire.TCRrep for {}\n with index_cols: {}\n with model organism: {}'.format(self.project_id, self.index_cols, self.organism)
def __getitem__(self, position):
# It should be decided whether get item should refer to the or to the clone_df or it could be for iterating over pw dist matrices
if self.clone_df is None:
return self.cell_df.loc[position]
if self.clone_df is not None:
return self.clone_df.loc[position]
def __len__(self):
return self.cell_df.shape[0]
def generate_ref_genes_from_db(self, db_file = "alphabeta_db.tsv"):
"""
Responsible for generating the all_genes attribute containing all
the reference TCR data.
Parameters
----------
db_file : string
Returns an ordered dictionary of reference sequences
"""
self.all_genes = repertoire_db.RefGeneSet(db_file).all_genes
def _map_gene_to_reference_seq2(self,
organism,
gene,
cdr,
attr ='cdrs_no_gaps'):
"""
internal function that looks up the cdr sequence (gapped or ungapped)
from the self.all_genes library
Parameter
---------
organism : string
mouse or human
gene : string
specifies the TCR gene such as 'TRAV1*01'
cdr : int
0 - CDR1, 1-CDR2 and 2 - CDR2.5
attr : string
'cdrs_no_gaps' or 'cdrs_aligned' with gaps from IMGT
"""
try:
aa_string = self.all_genes[organism][gene].__dict__[attr][cdr]
except KeyError:
aa_string = None
warnings.warn("{} gene was not recognized in reference db no cdr seq could be inferred".format(gene))
return(aa_string)
def deduplicate(self):
"""
With attribute self.index_col calls _deduplicate() and assigns
result to attribute self.clone_df
"""
self.clone_df = _deduplicate(self.cell_df, self.index_cols)
# check if any clones were lost due to missing information
if np.sum(self.cell_df['count']) != np.sum(self.clone_df['count']):
n_cells_lost = np.sum(self.cell_df['count']) - np.sum(self.clone_df['count'])
n_cell = np.sum(self.cell_df['count'])
warnings.warn(f"Not all cells/sequences could be grouped into clones. {n_cells_lost} of {n_cell} were not captured. This occurs when any of the values in the index columns are null or missing for a given sequence. To see entries with missing values use: tcrdist.repertoire.TCRrep.show_incomplete()\n")
# if no clone id column provided thetrn create one as a sequence of numbers
if "clone_id" not in self.clone_df:
N = self.clone_df.shape[0]
self.clone_df['clone_id'] = range(1, N + 1 ,1)
return self
def show_incomplete(self):
ind = self.cell_df[self.index_cols].isnull().any(axis = 1)
incomplete_clones = self.cell_df.loc[ind,self.index_cols].copy()
return incomplete_clones
# def tcr_motif_clones_df(self):
# """
# Use this function to create a clones_df input appropriate to TCRMotif.
#
# It make use of a mapper to ensure proper columns and column names
#
# Example
# -------
# TCRMotif(clones_df = TCRRep.tcr_motif_clones_df())
# """
# return _map_clone_df_to_TCRMotif_clone_df(self.clone_df)
def tcr_motif_clones_df(self):
"""
Use this function to create a clones_df input appropriate to TCRMotif.
It make use of a mapper to ensure proper columns and column names
Example
-------
TCRMotif(clones_df = TCRrep.tcr_motif_clones_df())
"""
return mappers.generic_pandas_mapper(self.clone_df,
mappers.TCRrep_clone_df_to_TCRMotif_clone_df)
def infer_cdrs_from_v_gene(self, chain, imgt_aligned = False):
"""
Function taking TCR v-gene name to infer the amino amino_acid
sequence of cdr1, cdr2, and pmhc loop regions.
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
imgt_aligned : boolean
if True cdr1, cdr2, cdr2.5 will be returned with gaps
and by definition will be the same length. MSH.......ET
Returns
-------
self.cell_df : pandas.core.frame.DataFrame
Assigns [cdr3|cdr2|cdr1|pmhc]_[a|b|d|g]_aa columns in self.cell_df
Examples
--------
>>> testrep = TCRrep(cell_df = example_df, organism = "human", chains= ["alpha","beta"])
>>> testrep.infer_cdrs_from_v_gene(chain = "alpha")
>>> testrep.infer_cdrs_from_v_gene(chain = "beta")
>>> testrep.index_cols = testrep.index_cols + ['cdr1_a_aa','cdr2_a_aa', 'pmhc_a_aa', 'cdr1_b_aa', 'cdr2_b_aa', 'pmhc_b_aa']
Notes
-----
This function takes the V-gene names and infers the amino amino_acid
sequence of the cdr1, cdr2, and pmhc region (pmhc refers to the
pMHC-facing loop between CDR2 and CDR3 (IMGT alignment columns 81 - 86.
These sequences are based up on lookup from the dictionary here:
originally: from tcrdist.cdr3s_human import pb_cdrs
now:
self.generate_ref_genes_from_db(db_file)
imgt_aligned : boolean
if True cdr1, cdr2, cdr2.5 will be returned with gaps
and by definition will be the same length.
MSH.......ET
FNH.......DT
LGH.......NA
References
----------
IMGT definitions of cdr1, cdr2, and pMHC-facing can be found here
http://www.imgt.org/IMGTScientificChart/Nomenclature/IMGT-FRCDRdefinition.html
"""
if not imgt_aligned:
self.imgt_aligned_status = False
f0 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 0,
organism = self.organism,
attr ='cdrs_no_gaps')
f1 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 1,
organism = self.organism,
attr ='cdrs_no_gaps')
f2 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 2,
organism = self.organism,
attr ='cdrs_no_gaps')
else:
self.imgt_aligned_status = True
f0 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 0,
organism = self.organism,
attr ='cdrs')
f1 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 1,
organism = self.organism,
attr ='cdrs')
f2 = lambda v : self._map_gene_to_reference_seq2(gene = v,
cdr = 2,
organism = self.organism,
attr ='cdrs')
if chain is "alpha":
self.cell_df['cdr1_a_aa'] = list(map(f0, self.cell_df.v_a_gene))
self.cell_df['cdr2_a_aa'] = list(map(f1, self.cell_df.v_a_gene))
self.cell_df['pmhc_a_aa'] = list(map(f2, self.cell_df.v_a_gene))
if chain is "beta":
self.cell_df['cdr1_b_aa'] = list(map(f0, self.cell_df.v_b_gene))
self.cell_df['cdr2_b_aa'] = list(map(f1, self.cell_df.v_b_gene))
self.cell_df['pmhc_b_aa'] = list(map(f2, self.cell_df.v_b_gene))
if chain is "gamma":
self.cell_df['cdr1_g_aa'] = list(map(f0, self.cell_df.v_g_gene))
self.cell_df['cdr2_g_aa'] = list(map(f1, self.cell_df.v_g_gene))
self.cell_df['pmhc_g_aa'] = list(map(f2, self.cell_df.v_g_gene))
if chain is "delta":
self.cell_df['cdr1_d_aa'] = list(map(f0, self.cell_df.v_d_gene))
self.cell_df['cdr2_d_aa'] = list(map(f1, self.cell_df.v_d_gene))
self.cell_df['pmhc_d_aa'] = list(map(f2, self.cell_df.v_d_gene))
def infer_olga_aa_cdr3_pgens(self,
chain,
cdr3_only = False,
chain_folder = None,
recomb_type = None):
"""
Infer the probability of generation using the Olga Code base
(Sethna et al. 2018) updated to python 3 for use with tcrdist.
Parameters
----------
chain : string
'alpha', 'beta' (TODO: create default models for 'gamma' and 'delta')
cdr3_only : boolean
(optional) if True, the amino acid cdr3 probability of generation statistic
will be calculated without using the V or J gene usage statistics
chain_folder : string
(optional) specifies the OLGA default model folder containing a
generative model. When None (which is recommended), the default
folder is chosen based on the chain argument.
recomb_type : string
(optional) 'VDJ' or 'VJ' specifying the OLGA recombination model.
When None (which is recommended), the default folder is chosen based
on the chain argument.
Returns
-------
olga_pgens : pd.Series
containing the probability of generation, this output is also assigned
to clone_df.cdr3_[a|b|g|d]_aa_pgen
Notes
-----
tcrdist2 authors UPDATED THE FOLLOWING CODE TO PYTHON 3
USING COMMIT e825c333f0f9a4eb02132e0bcf86f0dca9123114 (Jan 18, 2019)
ORIGINAL OLGA CODE CAN BE FOUND AT:
https://github.com/zsethna/OLGA
"""
assert(isinstance(self.clone_df, pd.DataFrame)), "this function requires a valid TCRrep.clone_df has been instantiated"
# The Nested If Statements assigns cdr3s, v_genes, j_genes based on chain, organism and other optional args
if chain == "alpha":
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_alpha"
elif self.organism is 'mouse':
raise ValueError("SORRY: OLGA default files do not yet support mouse alpha TCRs")
chain_folder = "mouse_T_alpha"
if (recomb_type is None):
recomb_type = "VJ"
cdr3s = self.clone_df.cdr3_a_aa
if not cdr3_only:
v_genes = self.clone_df.v_a_gene
j_genes = self.clone_df.j_a_gene
else:
v_genes = None
j_genes = None
if chain == "beta":
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_beta"
elif self.organism is 'mouse':
chain_folder = "mouse_T_beta"
if (recomb_type is None):
recomb_type = "VDJ"
cdr3s = self.clone_df.cdr3_b_aa
if not cdr3_only:
v_genes = self.clone_df.v_b_gene
j_genes = self.clone_df.j_b_gene
else:
v_genes = None
j_genes = None
if chain == "gamma":
raise ValueError("SORRY: OLGA default files do not yet support gamma TCRs")
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_gamma"
elif self.organism is 'mouse':
chain_folder = "mouse_T_gamma"
if (recomb_type is None):
recomb_type = None # ??? Not sure what is teh most appropriate model
cdr3s = self.clone_df.cdr3_g_aa
if not cdr3_only:
v_genes = self.clone_df.v_g_gene
j_genes = self.clone_df.j_g_gene
else:
v_genes = None
j_genes = None
if chain == "delta":
raise ValueError("SORRY:OLGA default files do not yet support delta TCRs")
if (chain_folder is None):
if (chain_folder is None):
if self.organism is 'human':
chain_folder = "human_T_delta"
elif self.organism is 'mouse':
chain_folder = "mouse_T_delta"
if (recomb_type is None):
recomb_type = None # ??? Not sure what is teh most appropriate model
cdr3s = self.clone_df.cdr3_d_aa
if not cdr3_only:
v_genes = self.clone_df.v_d_gene
j_genes = self.clone_df.j_d_gene
else:
v_genes = None
j_genes = None
# initializes the appropriate olga genomic model
my_olga_model = pgen.OlgaModel(chain_folder = chain_folder,
recomb_type = recomb_type)
# computes pgen from clone_df
olga_pgens = my_olga_model.compute_aa_cdr3_pgens(cdr3s,
v_genes,
j_genes)
if chain is "alpha":
self.clone_df['cdr3_a_aa_pgen'] = pd.Series(olga_pgens)
if chain is "beta":
self.clone_df['cdr3_b_aa_pgen'] = pd.Series(olga_pgens)
if chain is "gamma":
self.clone_df['cdr3_g_aa_pgen'] = pd.Series(olga_pgens)
if chain is "delta":
self.clone_df['cdr3_d_aa_pgen'] = pd.Series(olga_pgens)
return(pd.Series(olga_pgens))
def archive(self,
dest = "default_archive",
dest_tar_name = "default_archive.tar.gz",
verbose = True,
use_csv = True):
"""
Use Zipdist2 to Make an Archive.tar.gz
Parameters
----------
dest : str
e.g., 'default_archive'
dest_tar_name : str
e.g., 'default_archive.tar.gz'
verbose : bool
if True, report steps in archive process
use_csv : bool
if True, archive will include .csv file. Useful for porting files to other applications, but creates large files.
Example
-------
.. code-block:: python
tr = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr.archive(dest = "default_archive", dest_tar_name = "default_archive.tar.gz")
Notes
-----
See :py:meth:`tcrdist.repertoire.rebuild`: for reubilding a TCRrep instance from
an TCRrep archive .tar.gz file.
"""
self.cell_df_index = self.cell_df.index.copy()
self.cell_df = self.cell_df.reset_index()
z = Zipdist2(name = dest_tar_name , target = self)
z._save(dest = dest, dest_tar = dest_tar_name, verbose = verbose, use_csv = use_csv )
sys.stdout.write(f"\tArchiving your TCRrep using Zipdist2 in [{dest_tar_name}]\n")
def rebuild(self, dest_tar_name = "default_archive.tar.gz", verbose = True ):
"""
Use Zipdist2 to reubild a TCRrep instance from an Archive.tar.gz
Parameters
----------
dest_tar_name : str
e.g., 'default_archive.tar.gz'
verbose : bool
If True, report rebuilding process steps.
Example
-------
Shows :py:meth:`tcrdist.repertoire.archive` and :py:meth:`tcrdist.repertoire.rebuild`
used together.
.. code-block:: python
tr = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr.archive(dest = "default_archive", dest_tar_name = "default_archive.tar.gz")
tr_new = TCRrep(cell_df = pd.DataFrame(), organism = "mouse")
tr_new.rebuild(dest_tar_name = "default_archive.tar.gz")
Notes
-----
See :py:meth:`tcrdist.repertoire.archive` for creating TCRrep archive file.
"""
#tr = TCRrep(cell_df=df.iloc[0:0,:], chains=chains, organism='mouse')
z = Zipdist2(name = "default_archive", target = self)
z._build(dest_tar = dest_tar_name , target = self, verbose = verbose)
# VALIDATION OF INPUTS
# check that chains are valid.
self._validate_organism()
self._validate_chains()
# check that is a pd.DataFrame
self._validate_cell_df()
# RE INIT the REFERENCE DB see repertoire_db.py
self.generate_ref_genes_from_db(self.db_file)
def tcrdist2(self,
metric = "nw",
processes = None,
weights = None,
dump = False,
reduce = True,
save = False,
dest = "default_archive",
dest_tar_name = "default_archive.tar.gz",
verbose = True):
"""
Automated calculation of single chain and paired chain tcr-distances
Parameters
----------
metric : str
specified metric, currently only "nw" and "hamming" are supported
(see notes for legacy methods)
processes : int
number of cpus to use; the default is greedy and will use half of available
weights : dict
override cdr weightings
dump : bool
if True, dump intermediate cdr1, cdr2, and pmhc pairwise matrices
reduce : bool
if True, converts distance matrices to a smaller data type.
save : bool
if True, saves intermediate files to dest
dest : str
path to save components
verbose : bool
If True, provide sys.stdout reports.
Notes
-----
tcrdist2 is a method to help new-users run tcrdist2 with sensible defaults.
Distance metrics are highly customizable.
Consult the `docs <https://tcrdist2.readthedocs.io>`_ for more information.
To compute Dash et al. 2017 style tcrdistance, instead of tcrdist2,
use commands:
TCRrep._tcrdist_legacy_method_alpha_beta()
TCRrep._tcrdist_legacy_method_beta()
TCRrep._tcrdist_legacy_method_alpha()
TCRrep._tcrdist_legacy_method_gamma_delta()
TCRrep._tcrdist_legacy_method_gamma()
TCRrep._tcrdist_legacy_method_delta()
"""
# Default to use all available processes
if processes is None:
max_threads = multiprocessing.cpu_count()
processes = max_threads // 2
sys.stdout.write(f"trcdist2 detected {max_threads } available cpus/threads.\n")
sys.stdout.write(f"\tTCRrep use parallel processing, setting default to use {processes} cpus/threads.\n")
sys.stdout.write(f"\tThe `processes` arg of TCRrep.tcrdist2() can be set manually\n")
for chain in self.chains:
self.infer_cdrs_from_v_gene(chain=chain, imgt_aligned=True)
if weights is None:
weights = {'cdr1_a_aa':1,
'cdr2_a_aa':1,
'cdr3_a_aa':3,
'pmhc_a_aa':1,
'cdr1_b_aa':1,
'cdr2_b_aa':1,
'cdr3_b_aa':3,
'pmhc_b_aa':1,
'cdr1_g_aa':1,
'cdr2_g_aa':1,
'cdr3_g_aa':3,
'pmhc_g_aa':1,
'cdr1_d_aa':1,
'cdr2_d_aa':1,
'cdr3_d_aa':3,
'pmhc_d_aa':1,
'v_a_gene':0,
'j_a_gene':0,
'v_b_gene':0,
'j_b_gene':0,
'v_g_gene':0,
'j_g_gene':0,
'v_d_gene':0,
'j_d_gene':0,
'cdr3_a_nucseq':0,
'cdr3_b_nucseq':0,
'cdr3_g_nucseq':0,
'cdr3_d_nucseq':0}
index_cdrs = [k for k in weights.keys() if k in self.cell_df.columns]
for x in ['clone_id', 'subject', 'epitope']:
assert 'clone_id' in self.cell_df.columns, f"{x} must be in TCRrep.cell_df"
self.index_cols = ['clone_id', 'subject', 'epitope'] + index_cdrs
sys.stdout.write("Deduplicating your TCRrep.cell_df to make TCRrep.clone_df.\n")
self.deduplicate()
sys.stdout.write(f"Computing pairwise matrices for multiple Complementarity Determining Regions (CDRs):.\n")
for chain in self.chains:
if verbose: sys.stdout.write(f"\tComputing pairwise matrices for cdrs within the {chain}-chain using the {metric} metric.\n")
self.compute_pairwise_all(chain = chain, metric = metric, processes = processes)
sys.stdout.write("Calculating composite tcrdistance measures:\n")
self.compute_paired_tcrdist( chains=self.chains, store_result=False)
for chain in self.chains:
if verbose: sys.stdout.write(f"\tSingle chain pairwise tcrdistances are in attribute : TCRrep.pw_{chain}\n")
if verbose: sys.stdout.write(f"\tCombined pairwise tcrdistances are in attribute : TCRrep.pw_tcrdist\n")
if verbose: sys.stdout.write(f"\tCDR specific tcrdistances are in attributes, e.g., : TCRrep.cdr3_{chain[0]}_aa_pw\n")
# <dump> boolean controls whether we dump easy to recalculate cdr1, cdr2, pmhc
# <shrink> boolean controls whether we convert distance matrices
# to a smaller data type.
if reduce:
data_type = 'int16'
if verbose: sys.stdout.write(f"Reducing File Size: `reduce` argumment set to {reduce}:\n")
self.reduce_file_size( data_type = data_type, verbose = True)
# pairwise matices, which most users will never again.
if dump:
if verbose: sys.stdout.write(f"Cleanup: `dump` argument set to {dump}. Dumping individual CDR specific distance matrices:\n")
for i in index_cdrs:
if i.startswith("cdr1") or i.startswith("cdr2") or i.startswith("pmhc"):
if i.endswith("aa"):
i = f"{i}_pw"
sys.stdout.write(f"\tDumping : {i}\n")
self.__dict__[i] = None
if save:
if verbose: sys.stdout.write(f"Archiving your TCRrep using Zipdist2 (save = {save})\n")
# To avoid = ValueError: feather does not support serializing a non-default index for the index; you can .reset_index() to make the index into column(s)
self.archive(dest = dest, dest_tar_name = dest_tar_name, verbose = True)
if verbose: sys.stdout.write(f"\tArchiving your TCRrep using Zipdist2 in [{dest_tar_name}]\n")
if verbose: sys.stdout.write(f"TCRrep.tcrdist2() COMPLETED SUCCESSFULLY, see the docs for Analysis steps!\n")
def compute_pairwise_all(self,
chain,
compute_specific_region = None,
metric = "hamming",
processes = 2,
user_function = None,
to_matrix = True,
**kwargs):
"""
Computes pairwise distances for all regions on a given
chain or for a specific region on that chain.
Parameters
----------
chain : string
'alpha', 'beta', 'gamma', or 'delta'
compute_specific_region : string
optional string (e.g. "cdr2_a_aa") to over-ride function behavior
and compute only a single region
metric : string
'nw', 'hamming', or 'custom' (or if legacy tcrdist is to be calculated,
"tcrdist_cdr3", "tcrdist_cdr1", "tcrdist_cdr2",
"tcrdist_cdr2.5", "tcrdist_pmhc" can be supplied. WARNING:
imgt_aligned must be set to True in tr.infer_cdrs_from_v_gene().
processes : int
int for number of available cpu for multiprocessing (to see available
try multiprocessing.cpu_count())
user_function : function
function for a custom distance metric on two strings (This is
an advanced option, so don't use this unless you are absolutely
sure what you are doing; metric arg must be set to 'custom').
to_matrix : boolean
True will return pairwise distance as result as a 2D ndarray
Notes
-----
Uses _assign_pw_result to assign self.[cdr3|cdr2|cdr1|pmhc]_[a|b|d|g]_aa_pw objects
Examples
--------
>>> testrep = TCRrep(cell_df = example_df, organism = "human", chains= ["alpha","beta"])
>>> testrep.infer_cdrs_from_v_gene(chain = "alpha")
>>> testrep.infer_cdrs_from_v_gene(chain = "beta")
>>> testrep.index_cols = testrep.index_cols + ['cdr1_a_aa','cdr2_a_aa','pmhc_a_aa', 'cdr1_b_aa', 'cdr2_b_aa', 'pmhc_b_aa']
>>> testrep.deduplicate()
>>> testrep.compute_pairwise_all(chain = "alpha", metric= "hamming")
>>> testrep.compute_pairwise_all(chain = "beta", metric= "hamming")
alternatively, compute each region one by one
>>> testrep.compute_pairwise_all(chain = "beta", compute_specific_region="cdr1_b_aa")
>>> testrep.compute_pairwise_all(chain = "alpha", compute_specific_region="cdr2_a_aa")
"""
# validate chain argument passed
self._validate_chain(chain)
if metric in ["tcrdist_cdr3", "tcrdist_cdr1", "tcrdist_cdr2",
"tcrdist_cdr2.5", "tcrdist_pmhc"]:
if not self.imgt_aligned_status:
raise ValueError("imgt_aligned must be set to True in tr.infer_cdrs_from_v_gene()")
# If compute_specific_region is None, then the behavior is to loop through the a list regions.
if compute_specific_region is None:
index_col_from_chain = {'alpha' : ['cdr3_a_aa', 'cdr2_a_aa',
'cdr1_a_aa', 'pmhc_a_aa'],
'beta' : ['cdr3_b_aa', 'cdr2_b_aa',
'cdr1_b_aa', 'pmhc_b_aa'],
'gamma' : ['cdr3_g_aa', 'cdr2_g_aa',
'cdr1_g_aa', 'pmhc_g_aa'],
'delta' : ['cdr3_d_aa', 'cdr2_d_aa',
'cdr1_d_aa', 'pmhc_d_aa']}
# Alternative behavior: is to loop over a single chain and region.
else:
index_col_from_chain = {}
index_col_from_chain[chain] = [compute_specific_region]
for index_col in index_col_from_chain[chain]:
try:
sequences = self.clone_df[index_col]
except KeyError:
warnings.warn("{} not found, no distances computed for {}".format(index_col, index_col))
continue
# COMPUTE PAIRWISE
# If kwargs were passed use them, otherwise pass chain-sp. smat from above
if ('matrix' in kwargs) or ("open" in kwargs):
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**kwargs)
else:
# Pull the default substitution matrix from object attributes
smat = self._get_smat(chain = chain, index_col = index_col)
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**{'matrix' : smat})
# ASSIGN RESULT
self._assign_pw_result(pw = pw, chain=chain, index_col=index_col)
def compute_paired_tcrdist(self,
chains = ['alpha', 'beta'],
replacement_weights = {},
store_result = False):
"""
Computes tcrdistance metric combining distances metrics across multiple
T Cell Receptor CDR regions.
Parameters
----------
chains : list
list of strings containing some combination of 'alpha', 'beta',
'gamma', and 'delta'
replacement_weights : dictionary
optional dictionary of the form {'cdr1_a_aa_pw':1, 'cdr2_a_aa_pw':1}
used to place greater weight on certain TCR regions. The default
is a weight of 1.
store_result : boolean
True will store results to
:py:attr:`TCRrep.stored_tcrdist`
Returns
-------
r : dictionary
a dictionary with keys paired_tcrdist points to a 2D
tcrdist np.ndarray and paired_tcrdist_weights pointing to
dictionary of weights. See notes.
Notes
-----
Calling this function assigns results to
`TCRrep.paired_tcrdist` and
`TCRrep.paired_tcrdist_weights`
and stores r to
`TCRrep.stored_tcrdist`
In addition it returns a dictionary with keys `paired_tcrdist` 2D
tcrdist np.array and `paired_tcrdist_weights`
a dictionary of regions and relative weights:
{'paired_tcrdist': array([[ 0., 76., 80.,..., 89., 89., 87.],
[ 76., 0., 60., ..., 81., 75., 43.],
[ 80., 60., 0., ..., 59., 81., 77.],
...,
[ 89., 81., 59., ..., 0., 60., 58.],
[ 89., 75., 81., ..., 60., 0., 40.],
[ 87., 43., 77., ..., 58., 40., 0.]]),
'paired_tcrdist_weights': {'cdr1_a_aa_pw': 1,
'cdr1_b_aa_pw': 2,
'cdr2_a_aa_pw': 1,
'cdr2_b_aa_pw': 2,
'cdr3_a_aa_pw': 2,
'cdr3_b_aa_pw': 4,
'pmhc_a_aa_pw': 1,
'pmhc_b_aa_pw': 2}}
"""
[self._validate_chain(c) for c in chains]
weights = {'cdr1_a_aa_pw':1,
'cdr2_a_aa_pw':1,
'cdr3_a_aa_pw':1,
'pmhc_a_aa_pw':1,
'cdr1_b_aa_pw':1,
'cdr2_b_aa_pw':1,
'cdr3_b_aa_pw':1,
'pmhc_b_aa_pw':1,
'cdr1_g_aa_pw':1,
'cdr2_g_aa_pw':1,
'cdr3_g_aa_pw':1,
'pmhc_g_aa_pw':1,
'cdr1_d_aa_pw':1,
'cdr2_d_aa_pw':1,
'cdr3_d_aa_pw':1,
'pmhc_d_aa_pw':1}
for k in replacement_weights:
weights[k] = replacement_weights[k]
alpha_keys = [k for k in list(weights.keys()) if k.endswith("a_aa_pw")]
beta_keys = [k for k in list(weights.keys()) if k.endswith("b_aa_pw")]
gamma_keys = [k for k in list(weights.keys()) if k.endswith("g_aa_pw")]
delta_keys = [k for k in list(weights.keys()) if k.endswith("d_aa_pw")]
# for single chain computation, results in TCRrep.pw_alpha, TCRrep.pw_beta, TCRrep.pw_gamma, and or TCRrep.pw_delta,
if 'alpha' in chains:
tcrdist = np.zeros(self.cdr3_a_aa_pw.shape)
for k in alpha_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_alpha = tcrdist
if 'beta' in chains:
tcrdist = np.zeros(self.cdr3_b_aa_pw.shape)
for k in beta_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_beta = tcrdist
if 'gamma' in chains:
tcrdist = np.zeros(self.cdr3_g_aa_pw.shape)
for k in gamma_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_gamma = tcrdist
if 'delta' in chains:
tcrdist = np.zeros(self.cdr3_d_aa_pw.shape)
for k in delta_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
self.pw_delta = tcrdist
# For combined chain tcrdist, restults in TCRrep.paired_tcrdist and TCRrep.pw_tcrdist
full_keys = []
if 'alpha' in chains:
full_keys = full_keys + alpha_keys
if 'beta' in chains:
full_keys = full_keys + beta_keys
if 'gamma' in chains:
full_keys = full_keys + gamma_keys
if 'delta' in chains:
full_keys = full_keys + delta_keys
# initialize tcrdist matrix size
for k in full_keys:
try:
tcrdist = np.zeros(self.__dict__[k].shape)
break
except KeyError:
pass
for k in full_keys:
try:
tcrdist = self.__dict__[k]*weights[k] + tcrdist
except KeyError:
warnings.warn("tcrdist was calculated without: '{}' because pairwise distances haven't been computed for this region:".format(k))
pass
# keep 'paired_tcrdist' to avoid breaking tests
self.paired_tcrdist = tcrdist
self.pw_tcrdist = tcrdist
self.paired_tcrdist_weights = {k:weights[k] for k in full_keys}
# Typically we don't want to store different tcrdistance in the same repertoire, but
r = {'paired_tcrdist' : tcrdist,
'paired_tcrdist_weights' : {k:weights[k] for k in full_keys}}
if store_result:
self.stored_tcrdist.append(r)
return(r)
def compute_pairwise(self,
chain,
metric = "nw",
processes = 2,
user_function = None,
to_matrix = True,
**kwargs):
"""
Early Function to be replaced with compute_pairwise_all.
TODO: Rewrite test and remove.
"""
# validate chain argument passed
self._validate_chain(chain)
# another option would be to loop through the a list of chains
index_col_from_chain = {'alpha' : 'cdr3_a_aa',
'beta' : 'cdr3_b_aa',
'gamma' : 'crd3_g_aa',
'delta' : 'cdr3_d_aa'}
sequences = self.clone_df[index_col_from_chain[chain]]
# Pull the default substitution matrix
if chain == "alpha":
smat = self.cdr3_a_aa_smat
elif chain == "beta":
smat = self.cdr3_b_aa_smat
elif chain == 'gamma':
smat = self.cdr3_g_aa_smat
elif chain == "delta":
smat = self.cdr3_d_aa_smat
# If kwargs were passed use them, otherwise pass chain-sp. smat from above
if ('matrix' in kwargs) or ("open" in kwargs):
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**kwargs)
else:
pw = _compute_pairwise(sequences = sequences,
metric = metric,
processes = processes,
user_function = user_function,
**{'matrix' : smat})
if chain == "alpha":
self.cdr3_a_aa_pw = pw
elif chain == "beta":
self.cdr3_b_aa_pw = pw
elif chain == 'gamma':
self.cdr3_g_aa_pw = pw
elif chain == "delta":
self.cdr3_d_aa_pw = pw
def generate_cluster_index(self, t = 75, criterion = "distance", method = "complete", append_counts = False):
"""
Add 'cluster_index' column to TCRrep.clone_df
Parameters
----------
t : int
scipy.cluster.hierarchy.fcluster param t
criterion : str
scipy.cluster.hierarchy.fcluster param criterion
method : str
scipy.cluster.linkage parma method
Notes
-----
https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
"""
compressed_dmat = scipy.spatial.distance.squareform(self.paired_tcrdist, force = "vector")
Z = linkage(compressed_dmat, method = "complete")
cluster_index = fcluster(Z, t = t, criterion = criterion)
assert len(cluster_index) == self.clone_df.shape[0]
assert len(cluster_index) == self.paired_tcrdist.shape[0]
self.clone_df['cluster_index'] = cluster_index
if append_counts:
self._append_cluster_count()
self._append_seq_counts_per_cluster()
def _append_cluster_count(self):
"""
Appends the number of clones in a cluster to each row of TCRrep.clone_df
"""
cluster_count = self.clone_df.cluster_index.value_counts().\
reset_index().\
rename(columns = {'index':'cluster_index', "cluster_index": "cluster_count"}).\
copy()
self.clone_df = self.clone_df.merge(cluster_count, how= "left", left_on = "cluster_index", right_on = "cluster_index")
def _append_seq_counts_per_cluster(self):
"""
Appends the sum of seq counts per cluster to each row of TCRrep.clone_df
"""
seq_counts = self.clone_df.\
groupby(['cluster_index'])['count'].sum().\
reset_index().\
rename(columns = {'count':'seq_count'})
self.clone_df = self.clone_df.merge(seq_counts, how = "left", left_on = "cluster_index", right_on = "cluster_index")
def ispublic(self, gr, var = "subject", n = 1):
"""
Return True if a cluster public, defined as comprised of members from multiple individuals
or cell subsets (e.g., CD4/CD8)
Parameters
----------
gr : group
within pandas Data.Frame.groupby
var : str
variable name of class that group most transcend to be considered public
m : int
number of unique values of selected variable to be considered public
Returns
-------
r : bool
True if a cluster public
"""
r = len(gr[var].value_counts()) > n
if r:
return 'public'
else:
return 'private'
def get_func_stat(self, gr, var, func = np.median, **kwargs):
"""
get summary statistic by applying a func to a group
Parameter
---------
gr : group
within pandas Data.Frame.groupby
var : str
variable name of class that group most transcend to be considered public
func : function
function that can operate on a series or list of values specified by var
Returns
-------
r : float or int
"""
r = func(gr[var], **kwargs)
return r
def get_cluster_summary(self, df=None, groupvar = 'cluster_index'):
"""get_cluster_pgen_and_count_summary """
if df is None:
df = self.clone_df.copy()
cluster_summary = list()
assert groupvar in df.columns
assert "pgen" in df.columns
assert "count" in df.columns
for name, group in df.groupby([groupvar]):
public = self.ispublic(group, "subject")
cluster_summary.append({"cluster_index" : name,
"public" : public,
"min_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.min),
"median_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.median),
"max_pgen" : self.get_func_stat(gr = group, var = "pgen", func = np.max),
"cluster_count" : group.shape[0],
"seq_count" : self.get_func_stat(gr = group, var = "count", func = np.sum),
"seq_min" : self.get_func_stat(gr = group, var = "count", func = np.min),
"seq_median" : self.get_func_stat(gr = group, var = "count", func = np.median),
"seq_max" : self.get_func_stat(gr = group, var = "count", func = np.max)})
cluster_summary = | pd.DataFrame(cluster_summary) | pandas.DataFrame |
import pickle
import numpy as np
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
def get_feature_array(mols):
"""
Return an pd.DataFrame of molecule properties given an array (or array-like) of molecule objects
Parameters
----------
mols: array-like, array of molecule objects
Returns
----------
mol_features: pd.DataFrame of molecule features
"""
entries = [get_predictors(mol) for mol in mols]
mol_features = | pd.DataFrame(data=entries, dtype=float) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 Alibaba Group Holding Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import threading
import time
from typing import List
import numpy as np
import pandas as pd
from vineyard.core import default_builder_context
from vineyard.core import default_resolver_context
from vineyard.data import register_builtin_types
from vineyard.io.byte import ByteStream
from vineyard.io.dataframe import DataframeStream
from vineyard.io.recordbatch import RecordBatchStream
register_builtin_types(default_builder_context, default_resolver_context)
logger = logging.getLogger('vineyard')
def generate_random_dataframe(dtypes, size):
columns = dict()
for k, v in dtypes.items():
columns[k] = np.random.random(size).astype(v)
return | pd.DataFrame(columns) | pandas.DataFrame |
from os.path import join, exists, dirname, basename
from os import makedirs
import sys
import pandas as pd
from glob import glob
import seaborn as sns
import numpy as np
from scipy import stats
import xlsxwriter
import matplotlib.pyplot as plt
from scripts.parse_samplesheet import get_min_coverage, get_role, add_aliassamples, get_species
from scripts.snupy import check_snupy_status
import json
import datetime
import getpass
import socket
import requests
from requests.auth import HTTPBasicAuth
import urllib3
import yaml
import pickle
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
plt.switch_backend('Agg')
RESULT_NOT_PRESENT = -5
def report_undertermined_filesizes(fp_filesizes, fp_output, fp_error,
zscorethreshold=1):
# read all data
fps_sizes = glob(join(dirname(fp_filesizes), '*.txt'))
pds_sizes = []
for fp_size in fps_sizes:
data = pd.read_csv(
fp_size, sep="\t", names=["filesize", "filename", "status"],
index_col=1)
# mark given read as isme=True while all other data in the dir
# are isme=False
data['isme'] = fp_filesizes in fp_size
data['filesize'] /= 1024**3
pds_sizes.append(data)
pd_sizes = pd.concat(pds_sizes)
# compute z-score against non-bad known runs
pd_sizes['z-score'] = np.nan
idx_nonbad = pd_sizes[pd_sizes['status'] != 'bad'].index
pd_sizes.loc[idx_nonbad, 'z-score'] = stats.zscore(
pd_sizes.loc[idx_nonbad, 'filesize'])
# plot figure
fig = plt.figure()
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] != 'bad')]['filesize'],
kde=False, rug=False, color="black", label='known runs')
ax = sns.distplot(
pd_sizes[(pd_sizes['isme'] == np.False_) &
(pd_sizes['status'] == 'bad')]['filesize'],
kde=False, rug=False, color="red", label='bad runs')
ax = sns.distplot(
pd_sizes[pd_sizes['isme'] == np.True_]['filesize'],
kde=False, rug=True, color="green", label='this run')
_ = ax.set_ylabel('number of files')
_ = ax.set_xlabel('file-size in GB')
ax.set_title('run %s' % basename(fp_filesizes)[:-4])
ax.legend()
# raise error if current run contains surprisingly large undetermined
# filesize
if pd_sizes[(pd_sizes['isme'] == np.True_) &
(pd_sizes['status'] == 'unknown')]['z-score'].max() > zscorethreshold:
ax.set_title('ERROR: %s' % ax.get_title())
fig.savefig(fp_error, bbox_inches='tight')
raise ValueError(
("Compared to known historic runs, your run contains surprisingly "
"(z-score > %f) large file(s) of undetermined reads. You will find"
" an supporting image at '%s'. Please do the following things:\n"
"1. discuss with lab personal about the quality of the run.\n"
"2. should you decide to keep going with this run, mark file "
"status (3rd column) in file '%s' as 'good'.\n"
"3. for future automatic considerations, mark file status (3rd "
"column) as 'bad' if you have decided to abort processing due to"
" too low quality (z-score kind of averages about known values)."
) % (zscorethreshold, fp_error, fp_filesizes))
else:
fig.savefig(fp_output, bbox_inches='tight')
def report_exome_coverage(
fps_sample, fp_plot,
min_coverage=30, min_targets=80, coverage_cutoff=200):
"""Creates an exome coverage plot for multiple samples.
Parameters
----------
fps_sample : [str]
A list of file-paths with coverage data in csv format.
fp_plot : str
Filepath of output graph.
min_coverage : int
Default: 30.
An arbitraty threshold of minimal coverage that we expect.
A vertical dashed line is drawn at this value.
min_targets : float
Default: 80.
An arbitraty threshold of minimal targets that we expect to be covered.
A horizontal dashed line is drawn at this value.
coverage_cutoff : float
Default: 200.
Rightmost coverage cut-off value where X-axis is limited.
Raises
------
ValueError : If one of the sample's coverage falls below expected
thresholds.
"""
# Usually we aim for a 30X coverage on 80% of the sites.
fig, ax = plt.subplots()
ax.axhline(y=min_targets, xmin=0, xmax=coverage_cutoff, color='gray',
linestyle='--')
ax.axvline(x=min_coverage, ymin=0, ymax=100, color='gray', linestyle='--')
samples_below_coverage_threshold = []
for fp_sample in fps_sample:
coverage = pd.read_csv(fp_sample, sep="\t")
samplename = fp_sample.split('/')[-1].split('.')[0]
linewidth = 1
if coverage[coverage['#coverage'] == min_coverage]['percent_cumulative'].min() < min_targets:
linewidth = 4
samples_below_coverage_threshold.append(samplename)
ax.plot(coverage['#coverage'],
coverage['percent_cumulative'],
label=samplename,
linewidth=linewidth)
ax.set_xlim((0, coverage_cutoff))
ax.set_xlabel('Read Coverage')
ax.set_ylabel('Targeted Exome Bases')
ax.legend()
if len(samples_below_coverage_threshold) > 0:
fp_plot = fp_plot.replace('.pdf', '.error.pdf')
fig.savefig(fp_plot, bbox_inches='tight')
if len(samples_below_coverage_threshold) > 0:
raise ValueError(
"The following %i sample(s) have coverage below expected "
"thresholds. Please discuss with project PIs on how to proceed. "
"Maybe, samples need to be re-sequenced.\n\t%s\nYou will find more"
" information in the generated coverage plot '%s'." % (
len(samples_below_coverage_threshold),
'\n\t'.join(samples_below_coverage_threshold),
fp_plot))
ACTION_PROGRAMS = [
{'action': 'background',
'program': 'GATK',
'fileending_snupy_extract': '.snp_indel.gatk',
'fileending_spike_calls': '.gatk.snp_indel.vcf',
'stepname_spike_calls': 'gatk_CombineVariants',
},
{'action': 'background',
'program': 'Platypus',
'fileending_snupy_extract': '.indel.ptp',
'fileending_spike_calls': '.ptp.annotated.filtered.indels.vcf',
'stepname_spike_calls': 'platypus_filtered',
},
{'action': 'tumornormal',
'program': 'Varscan',
'fileending_snupy_extract': '.somatic.varscan',
'fileending_spike_calls':
{'homo sapiens': '.snp.somatic_germline.vcf',
'mus musculus': '.indel_snp.vcf'},
'stepname_spike_calls': 'merge_somatic',
},
{'action': 'tumornormal',
'program': 'Mutect',
'fileending_snupy_extract': '.somatic.mutect',
'fileending_spike_calls': '.all_calls.vcf',
'stepname_spike_calls': 'mutect',
},
{'action': 'tumornormal',
'program': 'Excavator2',
'fileending_snupy_extract': '.somatic.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_somatic',
},
{'action': 'trio',
'program': 'Varscan\ndenovo',
'fileending_snupy_extract': '.denovo.varscan',
'fileending_spike_calls': '.var2denovo.vcf',
'stepname_spike_calls': 'writing_headers',
},
{'action': 'trio',
'program': 'Excavator2',
'fileending_snupy_extract': '.trio.cnv.excavator2',
'fileending_spike_calls': '.vcf',
'stepname_spike_calls': 'excavator_trio',
},
]
def _get_statusdata_demultiplex(samplesheets, prefix, config):
demux_yields = []
for flowcell in samplesheets['run'].unique():
fp_yielddata = '%s%s%s/Data/%s.yield_data.csv' % (prefix, config['dirs']['intermediate'], config['stepnames']['yield_report'], flowcell)
if exists(fp_yielddata):
demux_yields.append(
pd.read_csv(fp_yielddata, sep="\t").rename(columns={'Project': 'Sample_Project', 'Sample': 'Sample_ID', 'Yield': 'yield'})) #.set_index(['Project', 'Lane', 'Sample', 'Barcode sequence'])
if len(demux_yields) <= 0:
return pd.DataFrame()
demux_yields = add_aliassamples(pd.concat(demux_yields, axis=0), config)
# map yields of original sampels to aliases
for idx, row in demux_yields[demux_yields['is_alias'] == True].iterrows():
orig = demux_yields[(demux_yields['Sample_Project'] == row['fastq-prefix'].split('/')[0]) & (demux_yields['Sample_ID'] == row['fastq-prefix'].split('/')[1])]['yield']
if orig.shape[0] > 0:
demux_yields.loc[idx, 'yield'] = orig.sum()
demux_yields = demux_yields.dropna(subset=['yield'])
return pd.DataFrame(demux_yields).groupby(['Sample_Project', 'Sample_ID'])['yield'].sum()
def _get_statusdata_coverage(samplesheets, prefix, config, min_targets=80):
coverages = []
for (sample_project, sample_id), meta in samplesheets.groupby(['Sample_Project', 'Sample_ID']):
role_sample_project, role_sample_id = sample_project, sample_id
if (meta['is_alias'] == True).any():
role_sample_project, role_sample_id = get_role(sample_project, meta['spike_entity_id'].unique()[0], meta['spike_entity_role'].unique()[0], samplesheets).split('/')
fp_coverage = join(prefix, config['dirs']['intermediate'], config['stepnames']['exome_coverage'], role_sample_project, '%s.exome_coverage.csv' % role_sample_id)
if exists(fp_coverage):
coverage = pd.read_csv(fp_coverage, sep="\t")
if coverage.shape[0] > 0:
coverages.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'coverage': coverage.loc[coverage['percent_cumulative'].apply(lambda x: abs(x-min_targets)).idxmin(), '#coverage']})
if len(coverages) <= 0:
return pd.DataFrame()
return pd.DataFrame(coverages).set_index(['Sample_Project', 'Sample_ID'])['coverage']
def _isKnownDuo(sample_project, spike_entity_id, config):
"""Checks if trio is a known duo, i.e. missing samples won't be available in the future.
Parameters
----------
sample_project : str
spike_entity_id : str
config : dict()
Snakemake configuration.
Returns
-------
Boolean: True, if spike_entity_id is in config list of known duos for given project.
False, otherwise.
"""
if 'projects' in config:
if sample_project in config['projects']:
if 'known_duos' in config['projects'][sample_project]:
if spike_entity_id in config['projects'][sample_project]['known_duos']:
return True
return False
def _get_statusdata_snupyextracted(samplesheets, prefix, snupy_instance, config):
results = []
for sample_project, meta in samplesheets.groupby('Sample_Project'):
# project in config file is not properly configure for snupy!
if config['projects'].get(sample_project, None) is None:
continue
if config['projects'][sample_project].get('snupy', None) is None:
continue
if config['projects'][sample_project]['snupy'][snupy_instance].get('project_id', None) is None:
continue
r = requests.get('%s/experiments/%s.json' % (config['credentials']['snupy'][snupy_instance]['host'], config['projects'][sample_project]['snupy'][snupy_instance]['project_id']),
auth=HTTPBasicAuth(config['credentials']['snupy'][snupy_instance]['username'], config['credentials']['snupy'][snupy_instance]['password']),
verify=False)
check_snupy_status(r)
samples = [sample['name'] for sample in r.json()['samples']]
for sample_id, meta_sample in meta.groupby('Sample_ID'):
for file_ending, action, program in [(ap['fileending_snupy_extract'], ap['action'], ap['program']) for ap in ACTION_PROGRAMS]:
# in some cases "sample name" hold spike_entity_id, in others Sample_ID
entity = sample_id
runs = '+'.join(sorted(meta_sample['run'].unique()))
if (action == 'trio'):
if meta_sample['spike_entity_role'].unique()[0] == 'patient':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
if (action == 'tumornormal'):
if meta_sample['spike_entity_role'].unique()[0] == 'tumor':
entity = meta_sample['spike_entity_id'].iloc[0]
runs = '+'.join(sorted(samplesheets[samplesheets['spike_entity_id'] == meta_sample['spike_entity_id'].iloc[0]]['run'].unique()))
name = '%s_%s/%s%s' % (runs, sample_project, entity, file_ending)
if (sample_project in config['projects']) and (pd.notnull(meta_sample['spike_entity_role'].iloc[0])):
if ((action == 'trio') and (meta_sample['spike_entity_role'].iloc[0] in ['patient', 'sibling']) and (not _isKnownDuo(sample_project, meta_sample['spike_entity_id'].iloc[0], config))) or\
((action == 'background')) or\
((action == 'tumornormal') and (meta_sample['spike_entity_role'].iloc[0].startswith('tumor'))):
results.append({
'Sample_Project': sample_project,
'Sample_ID': sample_id,
'action': action,
'program': program,
'status': name in samples,
'snupy_sample_name': name
})
if len(results) <= 0:
return pd.DataFrame()
return pd.DataFrame(results).set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])
def _get_statusdata_numberpassingcalls(samplesheets, prefix, config, RESULT_NOT_PRESENT, verbose=sys.stderr):
results = []
# leave out samples aliases
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] != True].fillna('not defined').groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
def _get_fileending(file_ending, fastq_prefix, samplesheets, config):
if isinstance(file_ending, dict):
return file_ending[get_species(fastq_prefix, samplesheets, config)]
else:
return file_ending
for ap in ACTION_PROGRAMS:
fp_vcf = None
if (ap['action'] == 'background') and pd.notnull(spike_entity_role):
if (ap['program'] == 'GATK'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Platypus'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'tumornormal'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].apply(lambda x: x.split('_')[0] if pd.notnull(x) else x).isin(['tumor']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID']):
# for Keimbahn, the tumor sample needs to include the name of the original sample ID
instance_id = '%s/%s' % (alias_sample_project, alias_sample_id)
if alias_spike_entity_role == 'tumor':
# for Maus_Hauer, the filename holds the entity name, but not the Sample ID
instance_id = '%s/%s' % (sample_project, spike_entity_id)
if (alias_spike_entity_role.split('_')[0] in set(['tumor'])):
if (ap['program'] == 'Varscan'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Mutect'):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], instance_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['action'] == 'trio'):
for (alias_sample_project, alias_spike_entity_role, alias_sample_id, alias_spike_entity_id), alias_meta in samplesheets[(samplesheets['fastq-prefix'] == fastq_prefix) & (samplesheets['spike_entity_role'].isin(['patient', 'sibling']))].groupby(['Sample_Project', 'spike_entity_role', 'Sample_ID', 'spike_entity_id']):
# Trios are a more complicated case, since by default the result name is given by the
# spike_entity_id, but if computed for siblings, the name is given by the fastq-prefix
if (ap['program'] == 'Varscan\ndenovo'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (ap['program'] == 'Excavator2'):
if (alias_spike_entity_role in set(['patient'])):
fp_vcf = '%s%s%s/%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], alias_sample_project, alias_spike_entity_id, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
elif (alias_spike_entity_role in set(['sibling'])):
fp_vcf = '%s%s%s/%s/Results/%s/EXCAVATORRegionCall_%s%s' % (prefix, config['dirs']['intermediate'], config['stepnames'][ap['stepname_spike_calls']], fastq_prefix, fastq_prefix.split('/')[-1], fastq_prefix.split('/')[-1], _get_fileending(ap['fileending_spike_calls'], fastq_prefix, meta, config))
# remove entry, if it is known (config.yaml) that this trio is incomplete
if (spike_entity_role == 'patient') and (spike_entity_id in config.get('projects', []).get(sample_project, []).get('known_duos', [])):
fp_vcf = None
results.append({
'Sample_Project': sample_project,
'Sample_ID': fastq_prefix.split('/')[-1],
'action': ap['action'],
'program': ap['program'],
'fp_calls': fp_vcf,
})
status = 0
num_status = 20
if verbose is not None:
print('of %i: ' % num_status, file=verbose, end="")
for i, res in enumerate(results):
if (verbose is not None) and int(i % (len(results) / num_status)) == 0:
status+=1
print('%i ' % status, file=verbose, end="")
nr_calls = RESULT_NOT_PRESENT
if (res['fp_calls'] is not None) and exists(res['fp_calls']):
try:
if res['program'] == 'Varscan':
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[7], squeeze=True).apply(lambda x: ';SS=2;' in x).sum()
else:
nr_calls = pd.read_csv(res['fp_calls'], comment='#', sep="\t", dtype=str, header=None, usecols=[6], squeeze=True).value_counts()['PASS']
except pd.io.common.EmptyDataError:
nr_calls = 0
res['number_calls'] = nr_calls
if verbose is not None:
print('done.', file=verbose)
if len(results) <= 0:
return pd.DataFrame()
results = pd.DataFrame(results)
results = results[pd.notnull(results['fp_calls'])].set_index(['Sample_Project', 'Sample_ID', 'action', 'program'])['number_calls']
# add alias sample results
for (sample_project, spike_entity_id, spike_entity_role, fastq_prefix), meta in samplesheets[samplesheets['is_alias'] == True].groupby(['Sample_Project', 'spike_entity_id', 'spike_entity_role', 'fastq-prefix']):
for (_, _, action, program), row in results.loc[fastq_prefix.split('/')[0], fastq_prefix.split('/')[-1], :].iteritems():
results.loc[sample_project, meta['Sample_ID'].unique()[0], action, program] = row
# remove samples, that don't have their own role, but were used for aliases
for (sample_project, sample_id), _ in samplesheets[ | pd.isnull(samplesheets['spike_entity_role']) | pandas.isnull |
import math
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from bcns import Durations, sim, Simulator, SimulatorCoordinated
from bcns.sim import Equ_LatD, Equ_pooled_LatD, Exp_LatD, Exp_pooled_LatD
def distance_between_2_points(a: tuple, b: tuple) -> float:
x1, y1 = a
x2, y2 = b
return round(math.sqrt((x2 - x1)**2 + (y2 - y1)**2), 6)
def prepare_test_centrality_lat_mat_baseline(nodes):
return Equ_LatD(3, 1, 0).tolist()
def prepare_test_centrality_lat_mat_1(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([0, 1, 2]) / 2
M2_lat = np.array([1, 0, 1]) / 2
M3_lat = np.array([2, 1, 0]) / 2
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare_test_centrality_lat_mat_2(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([0, 1, 4]) / 4
M2_lat = np.array([1, 0, 3]) / 4
M3_lat = np.array([4, 3, 0]) / 4
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare2_lat_mat_asymmetric(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i < j:
lat_mat[i][j] = lat_mat[i][j] * 100
return lat_mat
def prepare2_lat_mat(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
# 'OLM1', 'M1', 'OM1', 'OM', 'OM2', 'M2', 'ORM2', 'OEQ'
OLM1_lat = (np.array([0, 1, 2, 3, 4, 5, 6, 4.5825]) / 4)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
ORM1_lat = (np.array([2, 1, 0, 1, 2, 3, 4, 4.583]) / 4)
OM_lat = (np.array([3, 2, 1, 0, 1, 2, 3, 4.472135955])/4)
OLM2_lat = (np.array([4, 3, 2, 1, 0, 1, 2, 4.583]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
ORM2_lat = (np.array([6, 5, 4, 3, 2, 1, 0, 4.5825])/4)
lm1 = (-1, 0)
m1 = (0, 0)
rm1 = (1, 0)
cm12 = (2, 0)
lm2 = (3, 0)
m2 = (4, 0)
rm2 = (5, 0)
m3 = (2, math.sqrt(12))
OEQ_lat = (np.array([distance_between_2_points(lm1, m3),
4,
distance_between_2_points(rm1, m3),
distance_between_2_points(cm12, m3),
distance_between_2_points(m3, lm2),
4,
distance_between_2_points(m3, rm2),
0]) / 4)
lat_mat = [OLM1_lat, M1_lat, ORM1_lat, OM_lat,
OLM2_lat, M2_lat, ORM2_lat, OEQ_lat]
lat_mat = list(map(lambda x: x.tolist(), lat_mat))
return lat_mat
def prepare1_coordinators_lat_mat_proportional(proportion):
C_lat = [0, proportion]
M1_lat = [proportion, 0]
lat_mat = [C_lat, M1_lat]
return lat_mat
def prepare1f_coordinators_lat_mat_proportional(proportion):
C_lat = [0, 0.5, 0.5 + proportion]
M1_lat = [0.5, 0, float('inf')]
M2_lat = [0.5 + proportion, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_proportional(proportion):
C_lat = [0, proportion * 1, (1-proportion) * 1]
M1_lat = [proportion * 1, 0, float('inf')]
M2_lat = [(1-proportion) * 1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_proportional_M1_Farther(proportion, factor):
C_lat = [0, proportion * factor, (1-proportion) * 1]
M1_lat = [proportion * factor, 0, float('inf')]
M2_lat = [(1-proportion) * 1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare3_coordinators_lat_mat_proportional(proportion):
m1 = (0,0)
m2 = (1,0)
m3 = (0.5, math.sqrt(0.75))
cp = (0.5, math.sqrt(0.75)-proportion)
C_lat = [0, distance_between_2_points(cp, m1), distance_between_2_points(cp, m2), distance_between_2_points(cp, m3)]
M1_lat = [distance_between_2_points(cp, m1), 0, float('inf'), float('inf')]
M2_lat = [distance_between_2_points(cp, m2), float('inf'), 0, float('inf')]
M3_lat = [distance_between_2_points(cp, m3), float('inf'), float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat, M3_lat]
return lat_mat
def prepare4_p2p_lat_mat_proportional(proportion):
m1 = (0,1)
m2 = (1,1)
m3 = (1,0)
m4 = (0,0)
M1_lat = [0, 1, 1.41421, 1]
M2_lat = [1, 0, 1, 1.41421]
M3_lat = [1.41421, 1, 0, 1]
M4_lat = [1, 1.41421, 1, 0]
lat_mat = [M1_lat, M2_lat, M3_lat, M4_lat]
return lat_mat
def prepare4_coordinators_lat_mat_proportional(proportion):
m1 = (0, 1)
m2 = (1, 1)
m3 = (1, 0)
m4 = (0, 0)
cp = (1-proportion, 1-proportion)
C_lat = [0,
distance_between_2_points(cp, m1),
distance_between_2_points(cp, m2),
distance_between_2_points(cp, m3),
distance_between_2_points(cp, m4)]
M1_lat = [distance_between_2_points(cp, m1), 0, float('inf'), float('inf'), float('inf')]
M2_lat = [distance_between_2_points(cp, m2), float('inf'), 0, float('inf'), float('inf')]
M3_lat = [distance_between_2_points(cp, m3), float('inf'), float('inf'), 0, float('inf')]
M4_lat = [distance_between_2_points(cp, m4), float('inf'), float('inf'), float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat, M3_lat, M4_lat]
return lat_mat
def prepare2_coordinators_lat_mat_middle():
C_lat = [0, .5, .5]
M1_lat = [.5, 0, float('inf')]
M2_lat = [.5, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_near_weaker():
C_lat = [0, 0.1, 0.9]
M1_lat = [0.1, 0, float('inf')]
M2_lat = [0.9, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_near_stronger():
C_lat = [0, 0.9, 0.1]
M1_lat = [0.9, 0, float('inf')]
M2_lat = [0.1, float('inf'), 0]
lat_mat = [C_lat, M1_lat, M2_lat]
return lat_mat
def prepare2_coordinators_lat_mat_no_relay(nodes):
M1_lat = (np.array([0, 1, 2]))
C_lat = (np.array([1, 0, 1]) / 1000)
M2_lat = (np.array([2, 1, 0]))
lat_mat = [M1_lat, C_lat, M2_lat]
lat_mat = list(map(lambda x: x.tolist(), lat_mat))
return lat_mat
def prepare3_lat_mat_farther(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = np.array([1, 0, 1, 2, 3, 4, 5, 4*10]) / 4
M2_lat = np.array([5, 4, 3, 2, 1, 0, 1, 4*10]) / 4
M3_lat = np.array([11, 10, 9, 8, 9, 10, 11, 0])
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare3_lat_mat_fixed_asymetric(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 400, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
M3_lat = (
np.array([4.5825, 4, 4.583, 4.472135955, 4.583, 400, 4.5825, 0]) / 4)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare3_lat_mat(nodes):
lat_mat = OrderedDict.fromkeys(nodes)
M1_lat = (np.array([1, 0, 1, 2, 3, 4, 5, 4]) / 4)
M2_lat = (np.array([5, 4, 3, 2, 1, 0, 1, 4]) / 4)
##Coordinates:
lm1 = (-1, 0)
m1 = (0,0)
rm1 = (1,0)
cm12 = (2,0)
lm2 = (3,0)
m2 = (4,0)
rm2 = (5,0)
m3 = (2, math.sqrt(12))
M3_lat = (np.array([distance_between_2_points(lm1, m3),
4,
distance_between_2_points(rm1, m3),
distance_between_2_points(cm12, m3),
distance_between_2_points(m3, lm2),
4,
distance_between_2_points(m3, rm2),
0]) / 4)
#print(M3_lat)
latencies = [float('0')] * len(nodes)
for n in nodes:
lat_mat[n] = dict(zip(nodes, latencies))
if n is 'M1':
lat_mat[n] = dict(zip(nodes, M1_lat))
if n is 'M2':
lat_mat[n] = dict(zip(nodes, M2_lat))
if n is 'M3':
lat_mat[n] = dict(zip(nodes, M3_lat))
for n in nodes:
if n is not 'M1' and n is not 'M2' and n is not 'M3':
lat_mat[n]['M1'] = lat_mat['M1'][n]
lat_mat[n]['M2'] = lat_mat['M2'][n]
lat_mat[n]['M3'] = lat_mat['M3'][n]
lat_mat = [[lat_mat[i][j] for i in nodes] for j in nodes]
return lat_mat
def prepare5_lat_mat_fixed(nodes):
#self.NODES_IDS = ['WA-US', 'SI-CN', 'RE-IS', 'LI-CH', 'MO-RU']
'''# <location_1> <lat_1> <lng_1> <location_2> <lat_2> <lng_2> <dist. (in km)> <latency (in ms)>
WASHINGTON-DC-US 38.9047 -77.0164 SICHUAN-NA-CN 30.1333 102.9333 12338.40 197.41
WASHINGTON-DC-US 38.9047 -77.0164 REYKJAVÍK-NA-IS 64.1333 -21.9333 4512.89 72.21
WASHINGTON-DC-US 38.9047 -77.0164 LINTHAL-NA-CH 46.9167 9.0000 6703.91 107.26
WASHINGTON-DC-US 38.9047 -77.0164 MOSCOW-NA-RU 55.7500 37.6167 7820.54 125.13
SICHUAN-NA-CN 30.1333 102.9333 REYKJAVÍK-NA-IS 64.1333 -21.9333 8489.56 135.83
SICHUAN-NA-CN 30.1333 102.9333 LINTHAL-NA-CH 46.9167 9.0000 7891.06 126.26
SICHUAN-NA-CN 30.1333 102.9333 MOSCOW-NA-RU 55.7500 37.6167 5761.37 92.18
REYKJAVÍK-NA-IS 64.1333 -21.9333 LINTHAL-NA-CH 46.9167 9.0000 2680.24 42.88
REYKJAVÍK-NA-IS 64.1333 -21.9333 MOSCOW-NA-RU 55.7500 37.6167 3307.89 52.93
LINTHAL-NA-CH 46.9167 9.0000 MOSCOW-NA-RU 55.7500 37.61672196.05 35.14
'''
# ['WA-US', 'SI-CN', 'RE-IS', 'LI-CH', 'MO-RU']
WA_lat = np.array([0, 197.41, 72.21, 107.26, 125.13])/ (1000*1.5)
SI_lat = np.array([-1, 0, 135.83, 126.26, 92.18])/ (1000*1.5)
RE_lat = np.array([-1, -1, 0, 42.88, 52.93])/ (1000*1.5)
LI_lat = np.array([-1, -1, -1, 0, 35.14])/ (1000*1.5)
MO_lat = np.array([-1, -1, -1, -1, 0])/ (1000*1.5)
lat_mat = [WA_lat, SI_lat, RE_lat, LI_lat, MO_lat]
for i in range(len(lat_mat)):
for j in range(len(lat_mat)):
if i > j:
lat_mat[i][j] = lat_mat[j][i]
return lat_mat
def prepare100_lat_mat_fixed_centrality(nodes):
latencies = pd.read_csv('evaluation/100_cities.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*1.5))
return lat_mat
def prepare240_lat_mat_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]]/(1000*1.5))
return lat_mat
def prepare15_lat_mat_ls_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*3.2))
return lat_mat
def prepare240_lat_mat_cs_fixed_capital_centrality(nodes):
latencies = pd.read_csv(
'evaluation/cities_capitals_lat_lng_latency.txt', delim_whitespace=True)
lat_dict = defaultdict(dict)
for i in range(len(latencies)):
row = latencies.iloc[i]
lat_dict[row['location_1']][row['location_2']] = row['latency_ms']
lat_dict[row['location_2']][row['location_1']] = row['latency_ms']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
lat_mat[i][j] = (lat_dict[nodes[i]][nodes[j]] / (1000*3.2*1.5))
return lat_mat
def prepare15_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*1.5))
return lat_mat
def prepare15_ls_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*3.2))
return lat_mat
def prepare15_cs_lat_mat_fixed(nodes):
# nodes= ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
# 'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
# 'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
# 'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
latencies = pd.read_csv('evaluation/adjlst-2.txt', delim_whitespace=True)
nodes = ['WASHINGTON-DC-US', 'SICHUAN-NA-CN', 'REYKJAVÍK-NA-IS',
'LINTHAL-NA-CH', 'MOSCOW-NA-RU', 'TBILISI-NA-GE', 'KIEV-NA-UK',
'ANKARA-NA-TR', 'SKOPJE-NA-MK', 'HELSINKI-NA-FI', 'MANNHEIM-BW-DE',
'SINGAPORE-NA-SG', 'ASHBURN-VA-US', 'FRANKFURT-HE-DE', 'NUREMBURG-BV-DE']
lat_mat = [[float('0') for i in nodes] for j in nodes]
for i in range(len(nodes)):
for j in range(len(nodes)):
if i != j:
f1 = latencies[latencies['location_1'] == nodes[i]
][latencies['location_2'] == nodes[j]]
if len(f1) == 0:
f2 = latencies[latencies['location_2'] == nodes[i]
][latencies['location_1'] == nodes[j]]
result = f2['latency_ms'].iloc[0]
else:
result = f1['latency_ms'].iloc[0]
lat_mat[i][j] = (result/(1000*3.2*1.5))
return lat_mat
def to_dataframe_prepare_test_centrality_lat_mat_baseline(experiments_stats, nodes_ids):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[0]}, {x[1]}, {x[2]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[0]}, {x[1]}, {x[2]}]")
miner_df.id = miner_df.id.map(dict(zip(range(0, 3), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe2(experiments_stats, nodes_ids, nodes_count=2):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[1]}, {x[5]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[1]}, {x[5]}]")
miner_df.id = miner_df.id.map(dict(zip(range(0, 8), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe1_coordinators(experiments_stats, nodes_ids, nodes_count=2):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[0]}, {x[1]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[0]}, {x[1]}]")
miner_df.id = miner_df.id.map(
dict(zip(range(0, len(nodes_ids)), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe2_coordinators(experiments_stats, nodes_ids, nodes_count=3):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[1]}, {x[2]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[1]}, {x[2]}]")
miner_df.id = miner_df.id.map(
dict(zip(range(0, len(nodes_ids)), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe3_coordinators(experiments_stats, nodes_ids, nodes_count=3):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = pd.concat(miner_df)
df.drop(columns=['miners'], inplace=True)
df.hpd = df.hpd.apply(lambda x: f"[{x[1]}, {x[2]}, {x[3]}]")
miner_df.global_hpd = miner_df.global_hpd.apply(
lambda x: f"[{x[1]}, {x[2]}, {x[3]}]")
miner_df.id = miner_df.id.map(
dict(zip(range(0, len(nodes_ids)), nodes_ids)))
return {'miner': miner_df, 'global': df}
def to_dataframe4(experiments_stats, nodes_ids, nodes_count=3):
df = pd.DataFrame(experiments_stats)
miner_df = list()
for miner in df.miners:
miner_df.append(pd.DataFrame(miner))
miner_df = | pd.concat(miner_df) | pandas.concat |
import pandas as pd
dataframe = pd.read_csv("C:\\bank-additional-full.csv", sep=";")
cols =['job', 'marital', 'education', 'default', 'housing', 'loan', 'contact', 'month', 'day_of_week', 'poutcome']
data_1 = dataframe[cols]
data_dummies = pd.get_dummies(data_1)
result_df = | pd.concat([data_dummies, dataframe], axis=1) | pandas.concat |
"""
This code generate features based on the topic of papers that the author has wrote.
It take two files:
- paper_embeddings_64.txt
- author_papers.txt
Then we will apply clustering on the embeddings of the papers in such way that we group papers that have similar topic (Before doing so, we first need to lower the dimensionality of the embeddings as many clustering algorithms handle high dimensionality poorly.)
After clustering papers, we give label to each cluster, therefore each label will represent one topic.
Next, for each author, we will generate some features based on the topic of its papers.
This code will output one file:
- author_topics_stats.csv
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import umap
import ast
import re
from sklearn.cluster import Birch
import scipy.stats as stat
###########################Predict the topic of each paper#########################################################
#load papers embeddings in order to do clustring
f = open("../data/paper_embeddings_64.txt","r")
papers = {}
s = ""
pattern = re.compile(r'(\s){2,}')
for l in f:
if(":" in l and s!=""):
papers[s.split(":")[0]] = np.array(ast.literal_eval(re.sub(pattern, ',', s.split(":")[1]).replace(" ",",")))
s = l.replace("\n","")
else:
s = s+" "+l.replace("\n","")
f.close()
papers_id = list(papers.keys())
embeddings = list(papers.values())
#umap dimensionality reduction
umap_embeddings = umap.UMAP(n_neighbors=15,
n_components=5,
metric='euclidean').fit_transform(embeddings)
#clustring using Birch algorithm
cluster = Birch(n_clusters=32).fit(umap_embeddings)
Topic_papers = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import regex
class Matrix:
@staticmethod
def compile_r1_passed(r1_passed: dict) -> dict:
r1_compiled = {header[1:41] : labels[0] + labels[1] + labels[2] + labels[3] \
for header, labels in r1_passed.items()}
return r1_compiled
@staticmethod
def merge_r1_map_info(r1_compiled: dict, r2_map_passed: dict) -> dict:
r1_map_info = {header : [r2_map_passed[header], labels] \
for header, labels in r1_compiled.items() \
if header in r1_compiled and header in r2_map_passed}
return r1_map_info
@staticmethod
def get_gene_symbols(bed: str) -> list:
gene_symbols = set()
with open(bed, 'r') as bedf:
for line in bedf:
*_, symbol = line.split('\t')
gene_symbols.add(symbol.rstrip('\n'))
gene_symbols = sorted(gene_symbols)
return gene_symbols
@staticmethod
def generate_rasd_matrix(r1_compiled: dict, r1_map_info: dict, gene_symbols: list) -> pd.DataFrame:
cell_index = set(r1_compiled.values())
rasd_df = | pd.DataFrame(index=cell_index, columns=gene_symbols) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# In[2]:
pd.set_option('display.max_columns', None)
np.set_printoptions(suppress=True)
df= | pd.read_csv("E:/Study/ML tuts/Case Studies/fifa/players_20_classification.csv") | pandas.read_csv |
import pandas as pd
import numpy as np
import talib
def load_data(ticker):
"""
"""
path_to_data = 'https://stooq.pl/q/d/l/?s={ticker}&i=d'.format(
ticker=ticker)
return pd.read_csv(path_to_data)
def train_test_split(X, y, test_size = 0.3):
"""
Returns data split in train and test part.
Test part cotains the last 0.3 percentage of data, while train part
contains rest. Useful in spliting time series data, where we want to predict
on data that model has never seen
Keyword arguments:
X -- data frame or numpy array contaning predictors
y -- dataframe or numpy array contaning predicted values
test_size -- percent of data taken into test sample
"""
assert len(X) == len(y), "X and y not the same size"
size = int((1 - test_size) * len(X))
X_train = X[:size]
X_test = X[size:]
y_train = y[:size].values.reshape(-1,1)
y_test = y[size:].values.reshape(-1,1)
return X_train, X_test, y_train, y_test
def prepare_data_from_stooq(df, to_prediction = False, return_days = 5):
"""
Prepares data for X, y format from pandas dataframe
downloaded from stooq. Y is created as closing price in return_days
- opening price
Keyword arguments:
df -- data frame contaning data from stooq
return_days -- number of day frame in which to calculate y.
"""
if 'Wolumen' in df.columns:
df = df.drop(['Data', 'Wolumen', 'LOP'], axis=1)
else:
df = df.drop('Data', axis = 1)
y = df['Zamkniecie'].shift(-return_days) - df['Otwarcie']
if not to_prediction:
df = df.iloc[:-return_days,:]
y = y[:-return_days]/df['Otwarcie']
return df.values, y
def add_technical_features(X, y, return_array = False):
"""
Adds basic technical features used in paper:
"https://arxiv.org/pdf/1706.00948.pdf" using library talib.
Keyword arguments:
X -- numpy array or dataframe contaning predictors where cols:
#0 - open
#1 - High
#2 - Low
#3 - Close
y -- vector of returns.
"""
k, dfast = talib.STOCH(X[:,1],X[:,2],X[:,3])
X = np.hstack((X, k.reshape(-1,1)))
X = np.hstack((X, dfast.reshape(-1,1)))
X = np.hstack((X, talib.SMA(dfast, timeperiod=5).reshape(-1,1)))
X = np.hstack((X, talib.MOM(X[:,3], timeperiod=4).reshape(-1,1)))
X = np.hstack((X, talib.ROC(X[:,3], timeperiod=5).reshape(-1,1)))
X = np.hstack((X, talib.WILLR(X[:,1], X[:,2], X[:,3],
timeperiod=5).reshape(-1,1)))
X = np.hstack((X, (X[:,3] / talib.SMA(X[:,3], timeperiod=5)).reshape(-1,1)))
X = np.hstack((X, (X[:,3] / talib.SMA(X[:,3], timeperiod=10)).reshape(-1,1)))
X = np.hstack((X, talib.RSI(X[:,3]).reshape(-1,1)))
X = np.hstack((X, talib.CCI(X[:,1], X[:,2], X[:,3],
timeperiod=14).reshape(-1,1)))
y = y[~np.isnan(X).any(axis = 1)]
X = X[~np.isnan(X).any(axis = 1)]
if return_array:
return X, y
else:
colnames = ['open','high','low','close','stoch_k', 'stoch_d', 'SMA_5', 'mom', 'roc', 'willr', 'disp_5','disp_10','rsi','cci']
return | pd.DataFrame(X, columns=colnames) | pandas.DataFrame |
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
import shap
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
# from .utils import Boba_Utils as u
class Boba_Model_Diagnostics():
def __init__(self):
pass
def run_model_diagnostics(self, model, X_train, X_test, y_train, y_test, target):
self.get_model_stats(model, X_train, X_test, y_train, y_test, target)
self.plot_shap_imp(model,X_train)
self.plot_shap_bar(model,X_train)
self.residual_plot(model,X_test,y_test,target)
self.residual_density_plot(model,X_test,y_test,target)
self.identify_outliers(model, X_test, y_test,target)
self.residual_mean_plot(model,X_test,y_test,target)
self.residual_variance_plot(model,X_test,y_test,target)
self.PVA_plot(model,X_test,y_test,target)
self.inverse_PVA_plot(model,X_train,y_train,target)
self.estimates_by_var(model,X_train,y_train,target,'Age')
self.error_by_var(model,X_train,y_train,target,'Age')
self.volatility_by_var(model,X_train,y_train,target,'Age')
def get_model_stats(self, model, X_train, X_test, y_train, y_test, target):
train_pred = model.predict(X_train)
test_pred = model.predict(X_test)
test_RMSE = np.sqrt(mean_squared_error(y_test, test_pred)),
test_R2 = model.score(X_test,y_test),
test_MAE = mean_absolute_error(y_test, test_pred),
train_RMSE = np.sqrt(mean_squared_error(y_train, train_pred)),
train_R2 = model.score(X_train,y_train),
train_MAE = mean_absolute_error(y_train, train_pred),
df = pd.DataFrame(data = {'RMSE': np.round(train_RMSE,4),
'R^2': np.round(train_R2,4),
'MAE': np.round(train_MAE,4)}, index = ['train'])
df2 = pd.DataFrame(data = {'RMSE': np.round(test_RMSE,4),
'R^2': np.round(test_R2,4),
'MAE': np.round(test_MAE,4)}, index = ['test'])
print("Model Statistics for {}".format(target))
print('-'*40)
print(df)
print('-'*40)
print(df2)
print('-'*40)
def plot_shap_imp(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train)
plt.show()
def plot_shap_bar(self,model,X_train):
shap_values = shap.TreeExplainer(model).shap_values(X_train)
shap.summary_plot(shap_values, X_train, plot_type='bar')
plt.show()
def feature_imp(self,model,X_train,target):
sns.set_style('darkgrid')
names = X_train.columns
coef_df = pd.DataFrame({"Feature": names, "Importance": model.feature_importances_},
columns=["Feature", "Importance"])
coef_df = coef_df.sort_values('Importance',ascending=False)
coef_df
fig, ax = plt.subplots()
sns.barplot(x="Importance", y="Feature", data=coef_df.head(20),
label="Importance", color="b",orient='h')
plt.title("XGB Feature Importances for {}".format(target))
plt.show()
def residual_plot(self,model, X_test, y_test,target):
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
fig, ax = plt.subplots()
ax.scatter(pred, residuals)
ax.plot([pred.min(), pred.max()], [0, 0], 'k--', lw=4)
ax.set_xlabel('Predicted')
ax.set_ylabel('Residuals')
plt.title("Residual Plot for {}".format(target))
plt.show()
def residual_density_plot(self,model, X_test, y_test,target):
sns.set_style('darkgrid')
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
sns.distplot(residuals)
plt.title("Residual Density Plot for {}".format(target))
plt.show()
def residual_variance_plot(self, model, X_test, y_test,target):
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby(pd.qcut(y_temp[target], 10))['residuals'].std()
res_var.index = [1,2,3,4,5,6,7,8,9,10]
res_var = res_var.reset_index()
ax = sns.lineplot(x="index", y="residuals", data=res_var)
plt.title("Residual Variance plot for {}".format(target))
plt.xlabel("Prediction Decile")
plt.ylabel("Residual Variance")
plt.show()
except:
pass
def residual_mean_plot(self, model, X_test, y_test,target):
sns.set_style('darkgrid')
try:
pred = model.predict(X_test)
residuals = pd.Series(pred,index=X_test.index) - pd.Series(y_test[target])
y_temp = y_test.copy()
y_temp['pred'] = pred
y_temp['residuals'] = residuals
res_var = y_temp.groupby( | pd.qcut(y_temp['pred'], 10) | pandas.qcut |
import pandas as pd
import numpy as np
import scipy
import os, sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab
import matplotlib as mpl
import seaborn as sns
import analysis_utils
from multiprocessing import Pool
sys.path.append('../utils/')
from game_utils import *
in_dir = '../../'
out_dir = '../../plots/'
#out_dir = '../../figures/'
# data_names = ['Behavioral Data', 'Goal Inference Model']
# data_dirs = ['new-processed-processed-1en01','goal-inference-simulations-processed-1en01']
# nominal_dirs = ['new-synthetic-processed-1en01','synthetic-goal-inference-simulations-processed-1en01']
# matched_dirs = ['new-synthetic-score-matched-processed-1en01','synthetic-score-matched-goal-inference-simulations-processed-1en01']
# subset = '1en01'
# start = 1440
# groups = ['High Scoring','Low Scoring','']
# behaviors = ['Skilled','']
# score_cutoff = 0.75
# matched = [True, False]
data_names = ['Goal Inference Noise Model']
data_dirs = ['parset-simulations-processed-1en01']
nominal_dirs = ['synthetic-parset-simulations-processed-1en01']
matched_dirs = ['synthetic-score-matched-parset-simulations-processed-1en01']
subset = '1en01'
# data_names = ['Goal Inference Attention Model']
# data_dirs = ['goal-inference-attention-simulations-processed-1en01']
# nominal_dirs = ['synthetic-goal-inference-attention-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-goal-inference-attention-simulations-processed-1en01']
# subset = '1en01'
# data_names = ['Social Heuristic']
# data_dirs = ['social-heuristic-simulations-processed-1en01']
# nominal_dirs = ['synthetic-social-heuristic-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-social-heuristic-simulations-processed-1en01']
# subset = '1en01'
# data_names = ['Unconditional Social Heuristic']
# data_dirs = ['unconditional-social-heuristic-simulations-simulations-processed-1en01']
# nominal_dirs = ['synthetic-unconditional-social-heuristic-simulations-simulations-processed-1en01']
# matched_dirs = ['synthetic-score-matched-unconditional-social-heuristic-simulations-simulations-processed-1en01']
# subset = '1en01'
start = 1440
groups = ['']
behaviors = ['']
score_cutoff = 0.75
matched = [True, False]
def score(sub):
return np.mean(sub['bg_val'])
def speed(sub):
return sum(sub['velocity'] > 3) > 0
def spinning(sub):
return sum(sub['spinning']) > 0
def dist_to_mean_others(sub):
return np.mean(sub['dist_to_mean_others'])
def face_towards_after_away(sub):
ignore_state = lambda sub, i: sub.iloc[i]['spinning']
this_state = lambda sub, i: sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others']
next_state = lambda sub, i: sub.iloc[i]['facing']
return analysis_utils.get_value(sub, ignore_state, this_state, next_state)
def face_away_when_low(sub):
start_index = 1
initial_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] > sub.iloc[i]['dist_to_mean_others']) and sub.iloc[i]['bg_val'] < 1.0
while_condition = lambda sub, i: sub.iloc[i]['bg_val'] < 1.0
final_condition = lambda sub, i: (sub.iloc[i]['ave_dist_others'] < sub.iloc[i]['dist_to_mean_others'])
return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index)
def facing_spinning(sub):
start_index = 1
initial_condition = lambda sub, i: ~sub.iloc[i-1]['spinning'] and ~sub.iloc[i-1]['other_spinning'] and ~sub.iloc[i]['spinning'] and sub.iloc[i]['other_spinning']
while_condition = lambda sub, i: ~sub.iloc[i-1]['facing_spinning']
final_condition = lambda sub, i: sub.iloc[i]['facing_spinning']
return analysis_utils.get_while_value(sub, initial_condition, while_condition, final_condition, start_index)
function_names = ['Score','Speed','Spinning','Distance to Mean of Other Positions','Average Time Before Facing Distant Group', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players']
functions = [score, speed, spinning, dist_to_mean_others, face_towards_after_away, face_away_when_low, facing_spinning]
compares = [False, False, False, True, True, True, True]
# function_names = ['Distance to Mean of Other Positions', 'Average Time Before Facing Away From Group After Low Score', 'Average Time Before Facing Spinning Players']
# functions = [dist_to_mean_others, face_away_when_low, facing_spinning]
# compares = [True, True, True]
def plot_synthetic(args):
data_ind, func_ind, group, behavior, match = args
data_dir = in_dir + data_dirs[data_ind]
function = functions[func_ind]
if match:
synthetic_dir = in_dir + matched_dirs[data_ind]
else:
synthetic_dir = in_dir + nominal_dirs[data_ind]
games = []
ns = []
values = []
scores = []
sources = []
lengths = []
for t,game in enumerate(os.listdir(data_dir)):
if game[-4:] != '.csv':
continue
if game.split('_')[-2].split('-')[1] != subset:
continue
data = pd.io.parsers.read_csv(data_dir + '/' + game)
syn_data = pd.io.parsers.read_csv(synthetic_dir + '/' + game)
if compares[func_ind]:
if match:
dfs = ['Interacting Groups','Matched Nominal Groups']
else:
dfs = ['Interacting Groups','Random Nominal Groups']
else:
dfs = ['Interacting Groups']
for df in dfs:
if df == 'Interacting Groups':
players = list(set(data[data['tick'] == start]['pid'].dropna()))
else:
players = list(set(syn_data[syn_data['tick'] == start]['pid'].dropna()))
n = len(players)
# if n == 6:
# n = 5
for i,p in enumerate(players):
if df == 'Interacting Groups':
sub = data[data['pid'] == p]
else:
sub = syn_data[syn_data['pid'] == p]
ignore = False
if len(sub) < start:
ignore = True
sub = sub.iloc[start:].copy()
if behavior == 'Skilled':
if np.mean(sub['spinning']) == 0 or np.mean(sub['velocity']>3) == 0:
ignore = True
if group == 'High Scoring':
if np.mean(sub['bg_val']) < score_cutoff:
ignore = True
if group == 'Low Scoring':
if np.mean(sub['bg_val']) >= score_cutoff:
ignore = True
if ignore:
values += [np.nan]
else:
val = function(sub)
values += [val]
games += [game]
ns += [n]
scores += [np.mean(sub['bg_val'])]
sources += [df]
lengths += [len(sub)]
data = | pd.DataFrame({'Game':games,'Score':scores,'Number of Players':ns,function_names[func_ind]:values,'Source':sources,'Lengths':lengths}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:core_acc] *
# language: python
# name: conda-env-core_acc-py
# ---
# # Add annotations
#
# This notebook takes the dataframe with information about module composition and their labels and adds additional annotations including:
#
# 1. Which gene is contained within the modules (both gene id and gene name)
# 2. Baseline expression and expression in some context of interest
# 3. How clustered the module is on the genome
# 4. KEGG pathways that genes are found in
# 5. GO pathways genes are found in
# 6. Regulon/operon genes are found in
#
# All this information will help _P. aeruginosa_ experiments filter and determine which module might be interesting to explore.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
import random
import scipy.stats
import statsmodels.stats.multitest
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scripts import paths, utils, modules, annotations
random.seed(1)
# -
# Clustering method used to obtain gene-module assignments
method = "affinity"
processed = "spell"
# +
# Import gene memberships
pao1_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR, f"pao1_modules_{method}_acc_{processed}.tsv"
)
pa14_membership_filename = os.path.join(
paths.LOCAL_DATA_DIR, f"pa14_modules_{method}_acc_{processed}.tsv"
)
pao1_membership = pd.read_csv(pao1_membership_filename, sep="\t", index_col=0, header=0)
pa14_membership = pd.read_csv(pa14_membership_filename, sep="\t", index_col=0, header=0)
# -
print(pao1_membership.shape)
print(pa14_membership.shape)
pao1_membership.value_counts().median()
pa14_membership.value_counts().median()
# +
# Import gene metadata
pao1_gene_annot_filename = paths.GENE_PAO1_ANNOT
pa14_gene_annot_filename = paths.GENE_PA14_ANNOT
pao1_gene_annot = pd.read_csv(pao1_gene_annot_filename, index_col=0, header=0)
pa14_gene_annot = pd.read_csv(pa14_gene_annot_filename, index_col=0, header=0)
# -
# Import metadata of samples
metadata_filename = paths.SAMPLE_METADATA
# Get df with gene ids as indices and gene names as a column
# Having the data in a df instead of a series will just allow me to do my merges that are in the notebook
pao1_gene_annot = pao1_gene_annot["Name"].to_frame("gene name")
pa14_gene_annot = pa14_gene_annot["Name"].to_frame("gene name")
print(pao1_gene_annot.shape)
pao1_gene_annot.tail()
# +
# Use correlation matrix to get length of the genome
# -
# ## Add gene names
# Add gene names
pao1_gene_module_labels = pao1_membership.merge(
pao1_gene_annot, left_index=True, right_index=True
)
pa14_gene_module_labels = pa14_membership.merge(
pa14_gene_annot, left_index=True, right_index=True
)
# Note: Many gene ids don't have an associated gene name and so are NaNs
print(pao1_gene_module_labels.shape)
pao1_gene_module_labels.head()
# Note: Many gene ids don't have an associated gene name and so are NaNs
print(pa14_gene_module_labels.shape)
pa14_gene_module_labels.head()
# ## Add expression information
#
# 1. What is the baseline level of expression for each gene in the module?
# 2. What is the expression level of genes in a clinical context (i.e. clinical samples)?
# Read in expression data
# Data is of the form SRA sample id x gene id
pao1_compendium = pd.read_csv(paths.PAO1_COMPENDIUM, sep="\t", index_col=0)
pa14_compendium = pd.read_csv(paths.PA14_COMPENDIUM, sep="\t", index_col=0)
print(pao1_compendium.shape)
pao1_compendium.head()
print(pa14_compendium.shape)
pa14_compendium.head()
# Calculate median expression across all samples
pao1_median_all = pao1_compendium.median().to_frame("median expression")
pa14_median_all = pa14_compendium.median().to_frame("median expression")
pao1_median_all.head()
# +
# TO DO: Have Deb or Georgia select a study
# The following code blocks allow me to Select subset of samples and calculate the median
# expression across that subset of samples.
# An interesting selection would be what the clinical expression is, however
# it looks like we removed many of the clinical isolates from this compendium with our strain binning
# For now I will leave these blocks commented out
# selected_sample_ids = utils.get_sample_ids(
# metadata_filename, experiment_colname="SRA_study", sample_colname="Experiment", experiment_id="SRP063289")
# +
# Subset compendium
# subset_pao1_compendium = pao1_compendium.loc[selected_sample_ids]
# subset_pa14_compendium = pa14_compendium.loc[selected_sample_ids]
# +
# print(subset_pao1_compendium.shape)
# print(subset_pa14_compendium.shape)
# +
# pao1_median_subset = subset_pao1_compendium.median().to_frame("median subset expression")
# pa14_median_subset = subset_pa14_compendium.median().to_frame("median subset expression")
# -
# Add median expression to gene ids
pao1_gene_annot = pao1_gene_module_labels.merge(
pao1_median_all, left_index=True, right_index=True, how="left"
)
pa14_gene_annot = pa14_gene_module_labels.merge(
pa14_median_all, left_index=True, right_index=True, how="left"
)
# +
# Add median subset expression to gene ids
# pao1_gene_annot = pao1_gene_annot.merge(
# pao1_median_subset, left_index=True, right_index=True, how="left"
# )
# pa14_gene_annot = pa14_gene_annot.merge(
# pa14_median_subset, left_index=True, right_index=True, how="left"
# )
# -
print(pao1_gene_annot.shape)
pao1_gene_annot.head()
print(pa14_gene_annot.shape)
pa14_gene_annot.head()
# ## Genome location information
#
# How far are genes from other genes in the same module?
# +
# Sort gene ids and get last gene id to use as length of the genome
# This gene id should match the number of gene ids
sorted_pao1_compendium = pao1_compendium.T.sort_index()
pao1_last_gene_id = sorted_pao1_compendium.index[-1]
sorted_pa14_compendium = pa14_compendium.T.sort_index()
pa14_last_gene_id = sorted_pa14_compendium.index[-1]
# -
# Remove "PA" at the beginning of the identifier and convert into a float
pao1_genome_len = float(pao1_last_gene_id.split("PA")[-1])
pa14_genome_len = float(pa14_last_gene_id.split("PA14_")[-1])
print(pao1_genome_len, pa14_genome_len)
pao1_module_dist = modules.get_intra_module_dist(pao1_gene_annot, "PA", pao1_genome_len)
pa14_module_dist = modules.get_intra_module_dist(
pa14_gene_annot, "PA14_", pa14_genome_len
)
pao1_module_dist.head(10)
pa14_module_dist.head(10)
# Add module distance to gene names
pao1_gene_annot = pao1_gene_annot.merge(
pao1_module_dist, left_index=True, right_index=True, how="left"
)
pa14_gene_annot = pa14_gene_annot.merge(
pa14_module_dist, left_index=True, right_index=True, how="left"
)
pao1_gene_annot.head()
# ## Add KEGG pathway enrichment analysis
#
# For each pathway, find significant association of pathways in accessory-accessory modules. This information is only available for PAO1.
#
# The [Fisher's exact test](https://en.wikipedia.org/wiki/Fisher%27s_exact_test) determines whether there is a significant association between two categorical variables in a contingency table (i.e two classifications of the data). Here we used use the Fisher’s exact test to determine if there is an association between the two classifications: in kegg pathway or not and in accessory-accessory module or not. In other words, we want to determine if there is a statistically significant association between genes found in a given accessory-accessory moudle and the genes involved in a given KEGG pathway. To do this we compare the ratio of genes found in the kegg pathway that are in the accessory-accessory module to the ratio of kegg pathway genes that are not found in the accessory-accessory module.
#
# Since the numbers are large, we also applied the $\chi^2$ test as an alternative to the Fisher's exact test.
pao1_pathway_filename = "https://raw.githubusercontent.com/greenelab/adage/7a4eda39d360b224268921dc1f2c14b32788ab16/Node_interpretation/pseudomonas_KEGG_terms.txt"
pao1_pathways = annotations.load_format_KEGG(pao1_pathway_filename)
pao1_pathways.head()
pao1_gene_annot.head()
# Given an accessory-accessory module, look for the array module with the most overlap/significant p-value
def KEGG_enrichment(acc_membership_df, kegg_df):
all_genes = set(acc_membership_df.index)
rows = []
best_rows = []
# For each accessory-accessory module
for module_name, module_df_group in acc_membership_df.groupby("module id"):
num_module_genes = module_df_group.shape[0]
module_genes = set(module_df_group.index)
not_module_genes = all_genes.difference(module_genes)
# Find the KEGG pathway with the best overlap
for kegg_name in kegg_df.index:
num_kegg_genes = kegg_df.loc[kegg_name, 1]
kegg_genes = set(kegg_df.loc[kegg_name, 2])
not_kegg_genes = all_genes.difference(kegg_genes)
# Make contingency table
# -----------------|accessory module |not accessory module
# kegg pathway | # genes | # genes
# not kegg pathway | # genes | # genes
module_kegg_genes = module_genes.intersection(kegg_genes)
not_module_kegg_genes = not_module_genes.intersection(kegg_genes)
module_not_kegg_genes = module_genes.intersection(not_kegg_genes)
not_module_not_kegg_genes = not_module_genes.intersection(not_kegg_genes)
observed_contingency_table = np.array(
[
[len(module_kegg_genes), len(not_module_kegg_genes)],
[len(module_not_kegg_genes), len(not_module_not_kegg_genes)],
]
)
# Fisher's exact test
oddsr, pval = scipy.stats.fisher_exact(
observed_contingency_table, alternative="greater"
)
# chi2 test will not accept 0 counts for the contingency table
# chi2, pval, dof, expected_counts = scipy.stats.chi2_contingency(
# observed_contingency_table
# )
# print(oddsr, pval)
rows.append(
{
"module id": module_name,
"enriched KEGG pathway": kegg_name,
"p-value": pval,
"num shared genes": len(module_kegg_genes),
"size module": num_module_genes,
"size KEGG pathway": num_kegg_genes,
}
)
enrichment_df = pd.DataFrame(rows)
# Get corrected pvalues
(
reject_,
pvals_corrected_,
alphacSidak,
alphacBonf,
) = statsmodels.stats.multitest.multipletests(
enrichment_df["p-value"].values,
alpha=0.05,
method="fdr_bh",
is_sorted=False,
)
enrichment_df["corrected p-value"] = pvals_corrected_
# Select best module mapping
for grp, grp_df in enrichment_df.groupby("module id"):
# Find if any pathways is significant
any_significant = (grp_df["corrected p-value"] < 0.05).any()
if any_significant:
best_kegg = grp_df[grp_df["corrected p-value"] < 0.05][
"enriched KEGG pathway"
]
best_pval = grp_df[grp_df["corrected p-value"] < 0.05]["p-value"].values[0]
best_shared = grp_df[grp_df["corrected p-value"] < 0.05][
"num shared genes"
].values[0]
best_module_size = grp_df[grp_df["corrected p-value"] < 0.05][
"size module"
].values[0]
best_kegg_size = grp_df[grp_df["corrected p-value"] < 0.05][
"size KEGG pathway"
].values[0]
best_corrected_pval = grp_df[grp_df["corrected p-value"] < 0.05][
"corrected p-value"
].values[0]
best_rows.append(
{
"module id": grp,
"enriched KEGG pathway": best_kegg,
"p-value": best_pval,
"num shared genes": best_shared,
"size module": best_module_size,
"size KEGG pathway": best_kegg_size,
"corrected p-value": best_corrected_pval,
}
)
else:
best_rows.append(
{
"module id": grp,
"enriched KEGG pathway": "NA",
"p-value": "NA",
"num shared genes": "NA",
"size module": "NA",
"size KEGG pathway": "NA",
"corrected p-value": "NA",
}
)
best_enrichment_df = pd.DataFrame(best_rows).set_index("module id")
return best_enrichment_df
pao1_enrichment_df = KEGG_enrichment(pao1_membership, pao1_pathways)
pao1_enrichment_df.head(20)
# Add pathway enrichment information
pao1_gene_annot = pao1_gene_annot.merge(
pao1_enrichment_df, left_on="module id", right_index=True, how="left"
)
pao1_gene_annot.head()
# ## Import and format operon
pao1_operon_filename = paths.PAO1_OPERON
pa14_operon_filename = paths.PA14_OPERON
pao1_operon = annotations.load_format_operons(pao1_operon_filename)
pa14_operon = annotations.load_format_operons(pa14_operon_filename)
pao1_operon.head()
# Add operons to pathway annotations for PAO1
pao1_gene_annot = pao1_gene_annot.merge(
pao1_operon, left_index=True, right_index=True, how="left"
)
print(pao1_gene_annot.shape)
pao1_gene_annot.head()
# For PA14 we only have operon annotations
pa14_gene_annot = pa14_gene_annot.merge(
pa14_operon, left_index=True, right_index=True, how="left"
)
# ## Add regulon
#
# For each regulon, what genes are contained in it. This information is only available for PAO1
# +
pao1_regulon_filename = "https://raw.githubusercontent.com/greenelab/core-accessory-interactome/6635c0e357c0172c2cebd0368648030e0ee4beaf/data/metadata/regulons_format.csv"
pao1_regulons = | pd.read_csv(pao1_regulon_filename, index_col=0, header=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Import Dependencies
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
# In[2]:
import numpy as np
import pandas as pd
# In[3]:
import datetime as dt
# In[173]:
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from sqlalchemy import extract
from sqlalchemy import and_
from sqlalchemy import or_
from mpl_toolkits.basemap import Basemap
# In[174]:
from flask import jsonify
# # Reflect Tables into SQLAlchemy ORM
# In[5]:
engine = create_engine("sqlite:////Users/cla/Desktop/UM Data Science/Homework/10 -sqlalchemy-challenge/Resources/hawaii.sqlite")
# In[6]:
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# In[7]:
# We can view all of the classes that automap found
Base.classes.keys()
# In[8]:
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# In[9]:
# Create our session (link) from Python to the DB
session = Session(engine)
# # Exploratory Climate Analysis
# In[10]:
last_date=session.query(Measurement.date).order_by(Measurement.date.desc()).first()
for date in last_date:
split_last_date=date.split('-')
last_year=int(split_last_date[0]); last_month=int(split_last_date[1]); last_day=int(split_last_date[2])
query_date = dt.date(last_year, last_month, last_day) - dt.timedelta(days=365)
print(query_date)
# # Precipitation Analysis
# In[11]:
last_year = session.query(Measurement.date,Measurement.station,Measurement.prcp).filter(Measurement.date>=query_date).order_by(Measurement.date).all()
last_year
last_year_df = pd.DataFrame(last_year).set_index('date').dropna()
# In[12]:
last_year_df.head()
# In[13]:
last_year_df.plot(figsize=(15,15))
plt.show()
# In[14]:
# Use Pandas to calcualte the summary statistics for the precipitation data
prcp_stats = last_year_df.describe()
prcp_stats
# # Station Analysis
# In[15]:
#Stations available in dataset
station_count = session.query(Measurement.station).group_by(Measurement.station).count()
station_count
# In[16]:
# The most active stations
station_activity = session.query(func.count(Measurement.station).label('count'), Measurement.station).group_by(Measurement.station).order_by('count').all()
station_activity_df = pd.DataFrame(station_activity).dropna()
station_activity_df = station_activity_df.sort_values(by='count', ascending=False)
station_activity_df
# In[141]:
station_names = session.query(Station.station, Station.name, Station.latitude, Station.longitude, Station.elevation).all()
station_names = pd.DataFrame(station_names)
station_names
# In[18]:
most_active = station_activity_df.head(1)
most_active_station = most_active['station'].values[0]
print('The most active station is: ' + str(most_active_station))
# In[19]:
least_active = station_activity_df.sort_values(by='count').head(1)
least_active_station = least_active['station'].values[0]
print('The least active station is: ' + str(least_active_station))
# In[20]:
# The lowest temperature recorded
station_temperature = session.query(func.min(Measurement.tobs).label('min_temp'), Measurement.station).group_by(Measurement.station).order_by('min_temp').all()
station_mintemp_df = pd.DataFrame(station_temperature).dropna()
station_mintemp_df = station_mintemp_df.sort_values(by='min_temp')
min_temp_recorded = station_mintemp_df['min_temp'].values[0]
min_temp_station = station_mintemp_df['station'].values[0]
print('The minimum temperature recorded is ' + str(min_temp_recorded) + ' in station ' + str(min_temp_station))
station_mintemp_df
# In[21]:
#Lowest and highest temperature recorded, and average temperature of the most active station
most_active_summary = session.query(func.count(Measurement.station).label('count'), Measurement.station,(func.max(Measurement.tobs)), (func.min(Measurement.tobs)), (func.avg(Measurement.tobs))).group_by(Measurement.station).order_by(func.count(Measurement.station).desc()).first()
most_active_summary
# In[22]:
#Temperatures recorded at the most active station in the last year
most_active_station
most_active_ly = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date>=query_date).filter(Measurement.station==most_active_station).order_by(Measurement.date).all()
most_active_ly_df = pd.DataFrame(most_active_ly)
most_active_ly_df.plot.hist(bins=12)
plt.show()
# In[148]:
#Date weather calculator (search for weather in historical data)
start_date = input(f'Enter a date to search the weather(yyyy-mm-dd)')
end_date = input(f'End date of your search(yyyy-mm-dd)')
def calc_temps(start_date, end_date):
trip_temps = session.query(Measurement.date,func.avg(Measurement.tobs),func.max(Measurement.tobs),func.min(Measurement.tobs)). group_by(Measurement.date). filter(Measurement.date>=start_date, Measurement.date <= end_date).all()
return(trip_temps)
calc_temps(start_date, end_date)
# In[149]:
x = calc_temps(start_date, end_date)
trip_temp_df = pd.DataFrame(x, columns=['Date','Avg_Temp','Max_Temp','Min_Temp'])
trip_temp_df.plot.bar()
plt.show()
# # Weather forecast calculator for your future trip
# In[160]:
#Enter your planned trip datess
dates = []
start_date = input(f'Start date of your trip(yyyy-mm-dd)')
end_date = input(f'End date of your trip(yyyy-mm-dd)')
for date in start_date, end_date:
split_date=date.split('-')
dates.append(split_date)
start,end = dates
start_year=(start[0]); start_month=(start[1]); start_day=(start[2])
end_year=(end[0]); end_month=(end[1]); end_day=(end[2])
# In[161]:
#Trip weather calculator
trip = session.query(Measurement.date,func.avg(Measurement.tobs),func.max(Measurement.tobs),func.min(Measurement.tobs),func.avg(Measurement.prcp)).filter(or_(and_(extract('day', Measurement.date)>=start_day,extract('month', Measurement.date)==start_month),(and_(extract('day', Measurement.date)<=end_day,extract('month', Measurement.date)==end_month)))).group_by(Measurement.date).all()
trip_forecast_df = pd.DataFrame(trip, columns=['Date','Avg_Temp','Avg_Max_Temp','Avg_Min_Temp','Avg_Precipitation'])
trip_forecast_df.head(5)
# In[162]:
#Forecast for planned trip
print(f'During your planned trip: From ' + (start_date) + ' to: ' + (end_date) + ' the weather forecast is the following: ')
print(trip_avgforecast_df.mean())
# In[166]:
trip_forecast_summary
# In[172]:
fig,ax=plt.subplots()
for i in range(3):
ax.bar(x=i,height=trip_forecast_summary[i])
ax2=ax.twinx()
ax2.bar(x=3,height=trip_forecast_summary[3])
# In[163]:
trip_forecast_summary = trip_avgforecast_df.mean()
trip_forecast_summary.plot.bar()
# In[147]:
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
trip_rain = session.query(Measurement.station,func.avg(Measurement.prcp)).filter(or_(and_(extract('day', Measurement.date)>=start_day,extract('month', Measurement.date)==start_month),(and_(extract('day', Measurement.date)<=end_day,extract('month', Measurement.date)==end_month)))).group_by(Measurement.station).all()
trip_rain = | pd.DataFrame(trip_rain, columns=['Station','Avg_Precipitation']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from pandas.testing import assert_series_equal
from sid.config import INDEX_NAMES
from sid.update_states import _kill_people_over_icu_limit
from sid.update_states import _update_immunity_level
from sid.update_states import _update_info_on_new_tests
from sid.update_states import _update_info_on_new_vaccinations
from sid.update_states import compute_waning_immunity
from sid.update_states import update_derived_state_variables
@pytest.mark.unit
def test_kill_people_over_icu_limit_not_binding():
states = | pd.DataFrame({"needs_icu": [False] * 5 + [True] * 5, "cd_dead_true": -1}) | pandas.DataFrame |
"""Tests models
"""
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
from dsutils.models import InterpolatingPredictor
def test_InterpolatingPredictor():
"""Tests ensembling.EnsembleRegressor"""
# Make dummy data
N = 100
D = 3
X = pd.DataFrame(data=np.random.randn(N,D))
y = | pd.Series(index=X.index) | pandas.Series |
import pandas as pd
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
concat_df = pd.concat([df1, df2, df3], axis=1)
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
merge_df = pd.merge(left, right, how='inner', on='key')
left_comp = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right_comp = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
merge_df_comp = | pd.merge(left_comp, right_comp, on=['key1', 'key2']) | pandas.merge |
import pandas as pd
#############
###Helpers###
#############
def format_mat(flavor_mat):
flavor_frame = | pd.DataFrame(flavor_mat) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Zerodha Kite Connect - candlestick pattern scanner
@author: <NAME> (http://rasuquant.com/wp/)
"""
from kiteconnect import KiteConnect
import pandas as pd
import datetime as dt
import os
import time
import numpy as np
from technicalta import *
#cwd = os.chdir("D:\\Udemy\\Zerodha KiteConnect API\\1_account_authorization")
apikey = '<KEY>'
#generate trading session
'''access_token = open("access_token.txt",'r').read()
key_secret = open("api_key.txt",'r').read().split()
kite = KiteConnect(api_key=key_secret[0])
kite.set_access_token(access_token)
#get dump of all NSE instruments
instrument_dump = kite.instruments("NSE")
instrument_df = pd.DataFrame(instrument_dump)
'''
def instrumentLookup(instrument_df,symbol):
"""Looks up instrument token for a given script from instrument dump"""
try:
return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]
except:
return -1
def fetchOHLC(ticker,interval,duration):
"""extracts historical data and outputs in the form of dataframe"""
instrument = instrumentLookup(instrument_df,ticker)
data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))
data.set_index("date",inplace=True)
return data
def doji(ohlc_df):
"""returns dataframe with doji candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df['close']-df['open']).median()#abs(df["close"]- df["open"]).median()
df["doji"] = abs(df["close"] - df["open"]) <= (0.05 * avg_candle_size)
return df
def maru_bozu(ohlc_df):
"""returns dataframe with maru bozu candle column"""
df = ohlc_df.copy()
avg_candle_size = abs(df["close"] - df["open"]).median()
df["h-c"] = df["high"]-df["close"]
df["l-o"] = df["low"]-df["open"]
df["h-o"] = df["high"]-df["open"]
df["l-c"] = df["low"]-df["close"]
df["maru_bozu"] = np.where((df["close"] - df["open"] > 2*avg_candle_size) & \
(df[["h-c","l-o"]].max(axis=1) < 0.005*avg_candle_size),"maru_bozu_green",
np.where((df["open"] - df["close"] > 2*avg_candle_size) & \
(abs(df[["h-o","l-c"]]).max(axis=1) < 0.005*avg_candle_size),"maru_bozu_red",False))
df.drop(["h-c","l-o","h-o","l-c"],axis=1,inplace=True)
return df
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def shooting_star(ohlc_df):
"""returns dataframe with shooting star candle column"""
df = ohlc_df.copy()
df["sstar"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["high"] - df["close"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["high"] - df["open"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df
def levels(ohlc_day):
"""returns pivot point and support/resistance levels"""
high = round(ohlc_day["high"][-1],2)
low = round(ohlc_day["low"][-1],2)
close = round(ohlc_day["close"][-1],2)
pivot = round((high + low + close)/3,2)
r1 = round((2*pivot - low),2)
r2 = round((pivot + (high - low)),2)
r3 = round((high + 2*(pivot - low)),2)
s1 = round((2*pivot - high),2)
s2 = round((pivot - (high - low)),2)
s3 = round((low - 2*(high - pivot)),2)
return (pivot,r1,r2,r3,s1,s2,s3)
def trend(ohlc_df,n):
"function to assess the trend by analyzing each candle"
df = ohlc_df.copy()
df["up"] = np.where(df["low"]>=df["low"].shift(1),1,0)
df["dn"] = np.where(df["high"]<=df["high"].shift(1),1,0)
if df["close"][-1] > df["open"][-1]:
if df["up"][-1*n:].sum() >= 0.7*n:
return "uptrend"
elif df["open"][-1] > df["close"][-1]:
if df["dn"][-1*n:].sum() >= 0.7*n:
return "downtrend"
else:
return None
def res_sup(ohlc_df,ohlc_day):
"""calculates closest resistance and support levels for a given candle"""
level = ((ohlc_df["close"][-1] + ohlc_df["open"][-1])/2 + (ohlc_df["high"][-1] + ohlc_df["low"][-1])/2)/2
p,r1,r2,r3,s1,s2,s3 = levels(ohlc_day)
l_r1=level-r1
l_r2=level-r2
l_r3=level-r3
l_p=level-p
l_s1=level-s1
l_s2=level-s2
l_s3=level-s3
lev_ser = pd.Series([l_p,l_r1,l_r2,l_r3,l_s1,l_s2,l_s3],index=["p","r1","r2","r3","s1","s2","s3"])
sup = lev_ser[lev_ser>0].idxmin()
res = lev_ser[lev_ser>0].idxmax()
return (eval('{}'.format(res)), eval('{}'.format(sup)))
def candle_type(ohlc_df):
"""returns the candle type of the last candle of an OHLC DF"""
'''ohlc_df['open']=int(ohlc_df['open'])
ohlc_df['close']=int(ohlc_df['close'])
ohlc_df['high']=int(ohlc_df['high'])
ohlc_df['low']=int(ohlc_df['low'])'''
candle = None
if doji(ohlc_df)["doji"][-1] == True:
candle = "doji"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_green":
candle = "maru_bozu_green"
if maru_bozu(ohlc_df)["maru_bozu"][-1] == "maru_bozu_red":
candle = "maru_bozu_red"
if shooting_star(ohlc_df)["sstar"][-1] == True:
candle = "shooting_star"
if hammer(ohlc_df)["hammer"][-1] == True:
candle = "hammer"
return candle
def candle_pattern(ohlc_df,ohlc_day):
"""returns the candle pattern identified"""
pattern = None
signi = "low"
avg_candle_size = abs(ohlc_df["close"] - ohlc_df["open"]).median()
sup, res = res_sup(ohlc_df,ohlc_day)
if (sup - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (sup + 1.5*avg_candle_size):
signi = "HIGH"
if (res - 1.5*avg_candle_size) < ohlc_df["close"][-1] < (res + 1.5*avg_candle_size):
signi = "HIGH"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] > ohlc_df["close"][-2] \
and ohlc_df["close"][-1] > ohlc_df["open"][-1]:
pattern = "doji_bullish"
if candle_type(ohlc_df) == 'doji' \
and ohlc_df["close"][-1] < ohlc_df["close"][-2] \
and ohlc_df["close"][-1] < ohlc_df["open"][-1]:
pattern = "doji_bearish"
if candle_type(ohlc_df) == "maru_bozu_green":
pattern = "maru_bozu_bullish"
if candle_type(ohlc_df) == "maru_bozu_red":
pattern = "maru_bozu_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "hammer":
pattern = "hanging_man_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" and candle_type(ohlc_df) == "hammer":
pattern = "hammer_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" and candle_type(ohlc_df) == "shooting_star":
pattern = "shooting_star_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["close"][-2] \
and ohlc_df["low"][-1] > ohlc_df["open"][-2]:
pattern = "harami_cross_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) == "doji" \
and ohlc_df["high"][-1] < ohlc_df["open"][-2] \
and ohlc_df["low"][-1] > ohlc_df["close"][-2]:
pattern = "harami_cross_bullish"
if trend(ohlc_df.iloc[:-1,:],7) == "uptrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["open"][-1] > ohlc_df["high"][-2] \
and ohlc_df["close"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bearish"
if trend(ohlc_df.iloc[:-1,:],7) == "downtrend" \
and candle_type(ohlc_df) != "doji" \
and ohlc_df["close"][-1] > ohlc_df["high"][-2] \
and ohlc_df["open"][-1] < ohlc_df["low"][-2]:
pattern = "engulfing_bullish"
return "Significance - {}, Pattern - {}".format(signi,pattern)
##############################################################################################
tickers = ["ZEEL","WIPRO","VEDL","ULTRACEMCO","UPL","TITAN","TECHM","TATASTEEL",
"TATAMOTORS","TCS","SUNPHARMA","SBIN","SHREECEM","RELIANCE","POWERGRID",
"ONGC","NESTLEIND","NTPC","MARUTI","M&M","LT","KOTAKBANK","JSWSTEEL","INFY",
"INDUSINDBK","IOC","ITC","ICICIBANK","HDFC","HINDUNILVR","HINDALCO",
"HEROMOTOCO","HDFCBANK","HCLTECH","GRASIM","GAIL","EICHERMOT","DRREDDY",
"COALINDIA","CIPLA","BRITANNIA","INFRATEL","BHARTIARTL","BPCL","BAJAJFINSV",
"BAJFINANCE","BAJAJ-AUTO","AXISBANK","ASIANPAINT","ADANIPORTS","IDEA",
"MCDOWELL-N","UBL","NIACL","SIEMENS","SRTRANSFIN","SBILIFE","PNB",
"PGHH","PFC","PEL","PIDILITIND","PETRONET","PAGEIND","OFSS","NMDC","NHPC",
"MOTHERSUMI","MARICO","LUPIN","L&TFH","INDIGO","IBULHSGFIN","ICICIPRULI",
"ICICIGI","HINDZINC","HINDPETRO","HAVELLS","HDFCLIFE","HDFCAMC","GODREJCP",
"GICRE","DIVISLAB","DABUR","DLF","CONCOR","COLPAL","CADILAHC","BOSCHLTD",
"BIOCON","BERGEPAINT","BANKBARODA","BANDHANBNK","BAJAJHLDNG","DMART",
"AUROPHARMA","ASHOKLEY","AMBUJACEM","ADANITRANS","ACC",
"WHIRLPOOL","WABCOINDIA","VOLTAS","VINATIORGA","VBL","VARROC","VGUARD",
"UNIONBANK","UCOBANK","TRENT","TORNTPOWER","TORNTPHARM","THERMAX","RAMCOCEM",
"TATAPOWER","TATACONSUM","TVSMOTOR","TTKPRESTIG","SYNGENE","SYMPHONY",
"SUPREMEIND","SUNDRMFAST","SUNDARMFIN","SUNTV","STRTECH","SAIL","SOLARINDS",
"SHRIRAMCIT","SCHAEFFLER","SANOFI","SRF","SKFINDIA","SJVN","RELAXO",
"RAJESHEXPO","RECLTD","RBLBANK","QUESS","PRESTIGE","POLYCAB","PHOENIXLTD",
"PFIZER","PNBHOUSING","PIIND","OIL","OBEROIRLTY","NAM-INDIA","NATIONALUM",
"NLCINDIA","NBCC","NATCOPHARM","MUTHOOTFIN","MPHASIS","MOTILALOFS","MINDTREE",
"MFSL","MRPL","MANAPPURAM","MAHINDCIE","M&MFIN","MGL","MRF","LTI","LICHSGFIN",
"LTTS","KANSAINER","KRBL","JUBILANT","JUBLFOOD","JINDALSTEL","JSWENERGY",
"IPCALAB","NAUKRI","IGL","IOB","INDHOTEL","INDIANB","IBVENTURES","IDFCFIRSTB",
"IDBI","ISEC","HUDCO","HONAUT","HAL","HEXAWARE","HATSUN","HEG","GSPL",
"GUJGASLTD","GRAPHITE","GODREJPROP","GODREJIND","GODREJAGRO","GLENMARK",
"GLAXO","GILLETTE","GMRINFRA","FRETAIL","FCONSUMER","FORTIS","FEDERALBNK",
"EXIDEIND","ESCORTS","ERIS","ENGINERSIN","ENDURANCE","EMAMILTD","EDELWEISS",
"EIHOTEL","LALPATHLAB","DALBHARAT","CUMMINSIND","CROMPTON","COROMANDEL","CUB",
"CHOLAFIN","CHOLAHLDNG","CENTRALBK","CASTROLIND","CANBK","CRISIL","CESC",
"BBTC","BLUEDART","BHEL","BHARATFORG","BEL","BAYERCROP","BATAINDIA",
"BANKINDIA","BALKRISIND","ATUL","ASTRAL","APOLLOTYRE","APOLLOHOSP",
"AMARAJABAT","ALKEM","APLLTD","AJANTPHARM","ABFRL","ABCAPITAL","ADANIPOWER",
"ADANIGREEN","ADANIGAS","ABBOTINDIA","AAVAS","AARTIIND","AUBANK","AIAENG","3MINDIA"]
def main():
for ticker in tickers:
try:
ohlc = fetchOHLC(ticker, '5minute',5)
ohlc_day = fetchOHLC(ticker, 'day',30)
ohlc_day = ohlc_day.iloc[:-1,:]
cp = candle_pattern(ohlc,ohlc_day)
print(ticker, ": ",cp)
except:
print("skipping for ",ticker)
'''
# Continuous execution
starttime=time.time()
timeout = time.time() + 60*60*1 # 60 seconds times 60 meaning the script will run for 1 hr
while time.time() <= timeout:
try:
print("passthrough at ",time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
main()
time.sleep(300 - ((time.time() - starttime) % 300.0)) # 300 second interval between each new execution
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()'''
from pprint import pprint
def AlphaData_fxintraday(frombase,to,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
frombase=['EUR','USD','GBP','AUD','EUR']
to=['USD','JPY','CAD','CNY','CHF','HKD','GBP','KRW']
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_intraday(i,j,60)
pprint('{}/{} Done'.format(i,j))
time.sleep(30)
'''
def AlphaData_fxdaily(frombase,to):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=FX_DAILY&from_symbol={}&to_symbol={}&apikey={}".format(frombase,to,apikey)
#url="https://www.alphavantage.co/query?function=FX_INTRADAY&from_symbol={}&to_symbol={}&interval={}min&apikey={}".format(frombase,to,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series FX (Daily)']
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df=df.drop(['1. open','2. high','3. low', '4. close'], axis=1)
return df#data['Time Series FX ({}min)'.format(interval)]
except:
print("An exception occurred")
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
dataintra=AlphaData_intraday(i,j,5)
datadaily=AlphaData_daily(i,j)
pprint(dataintra)
if len(dataintra) > 0:
if len(datadaily) > 0 :
pprint(candle_type(dataintra))
#cp = candle_pattern(dataintra,datadaily)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)'''
'''
for j in frombase:
for i in to:
pprint('{}/{} in process'.format(i,j))
data=AlphaData_daily(i,j)
pprint('{}/{} Done'.format(i,j))
time.sleep(5)
'''
def AlphaData_intraday(symbol,interval):
import requests
import json
from pprint import pprint
global apikey
url="https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY&symbol={}&interval={}min&apikey={}".format(symbol,interval,apikey)
data=requests.get(url).json()
#pprint(dict(data['Time Series FX ({}min)'.format(interval)]))#['2020-07-31 20:20:00'])#['4. close'])
import pandas as pd
try:
if data:
data=data['Time Series ({}min)'.format(interval)]
df=pd.DataFrame(data).T
df['open']=df['1. open']
df['high']=df['2. high']
df['low']=df['3. low']
df['close']=df['4. close']
df['volume']=df['5. volume']
df['volume']=df['5. volume']
df=df.drop(['1. open','2. high','3. low', '4. close','5. volume'], axis=1)
df['open']=pd.to_numeric(df['open'])
df['high']= | pd.to_numeric(df['high']) | pandas.to_numeric |
import streamlit as st
import numpy as np
import pandas as pd
from matplotlib.image import imread
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import seaborn as sns
import requests
import joblib
import shap
# import streamlit.components.v1 as components
shap.initjs()
st.set_option('deprecation.showPyplotGlobalUse', False)
########################################################
# Session for the API
########################################################
def fetch(session, url):
"""Create session for the API
Args:
session : session
url (link): complete url to connect to
Returns:
result (json): result of the request to the url
"""
try:
result = session.get(url)
return result.json()
except Exception:
return {}
session = requests.Session()
########################################################
# Functions to call the EndPoints
########################################################
def client():
#Getting Client details
response = fetch(session, f"http://projetoc-scoring.herokuapp.com/api/clients")
if response:
return response["clientsId"]
else:
return "Error"
def client_details(id):
#Getting Client details
response = fetch(session,f"http://projetoc-scoring.herokuapp.com/api/clients/{id}")
if response:
return response
else:
return "Error"
def client_prediction(id):
#Getting Client prediction
response = fetch(session, f"http://projetoc-scoring.herokuapp.com/api/clients/{id}/prediction")
if response:
return response
else:
return "Error"
########################################################
# Function to load data stored on github
########################################################
@st.experimental_memo(suppress_st_warning=True)
def load_data():
"""Load data necessary for the page 2 of the dashboard.
- df_train
- df_test
- df_test_cat_features
- df_test_cat_features
Returns:
df, df_test, df_test_cat_features, df_test_num_features : DataFrame loaded
"""
df = pd.read_csv("./dashboard_data/df_train.csv")
df_test = pd.read_csv("./dashboard_data/df_test.csv")
df_test_cat_features = pd.read_csv("./dashboard_data/df_test_cat_features.csv")
df_test_num_features = | pd.read_csv("./dashboard_data/df_test_num_features.csv") | pandas.read_csv |
#! /usr/bin/env python3
import argparse
import re,sys,os,math,gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import copy
import math
from math import pi
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy import sparse
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import copy
import math
import seaborn as sns
#from scipy.interpolate import BSpline, make_interp_spline
plt.rcParams.update({'figure.max_open_warning': 100000})
plt.style.use('seaborn-colorblind')
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['savefig.dpi'] = 300 #图片像素
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
__author__ ='赵玥'
__mail__ ='<EMAIL>'
_data__ ='20191101'
def draw_boundaries(ax,Boundary_dict,start,end,samplelist,str_x,sam_x):
ax.tick_params(top='off',bottom='off',left='on',right='off')
for loc in ['top','left','right','bottom']:
ax.spines[loc].set_visible(False)
#ax.spines['left'].set_color('k')
#ax.spines['left'].set_linewidth(2)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['left'].set_linewidth(1)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_axis_bgcolor('w')
ax.set(xticks=[])
ax.set(yticks=[])
sample1 = samplelist[0]
sample2 = samplelist[1]
boundary_mid1 = Boundary_dict[sample1]['mid'].tolist()
boundary_mid2 = Boundary_dict[sample2]['mid'].tolist()
bound_y1min = [1.25 for i in boundary_mid1]
bound_y1max = [1.75 for i in boundary_mid1]
bound_y2min = [0.25 for i in boundary_mid2]
bound_y2max = [0.75 for i in boundary_mid2]
ax.set_ylim(0,2)
ax.vlines(boundary_mid1,bound_y1min,bound_y1max,lw=2,color='red')
ax.vlines(boundary_mid2,bound_y2min,bound_y2max,lw=2,color='green')
ax.set_xlim(start,end)
ax.text(str_x,0.5,'bound',horizontalalignment='right',verticalalignment='center',rotation='vertical',transform=ax.transAxes,fontsize=8)
ax.text(sam_x,0.75,sample1,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="red",fontsize=8)
ax.text(sam_x,0.25,sample2,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="green",fontsize=8)
def cut_boundaries(Boundary_dict,sample,boundaryPath,chrom,start,end):
Boundary_df = pd.read_table(boundaryPath,header=0,index_col=None,encoding='utf-8')
Boundary_df = Boundary_df.fillna(0)
Boundary_df = Boundary_df[['start','end']]
Boundary_df['mid'] = (Boundary_df['start'] + Boundary_df['end'])/2
Boundary_df = Boundary_df[Boundary_df['mid']>=start]
Boundary_df = Boundary_df[Boundary_df['mid']<=end]
Boundary_df.reset_index(drop=True)
Boundary_dict[sample] = Boundary_df
return Boundary_dict
def draw_insulation(ax,insu,chrs,start,end,color):
#df_insu=cut_insulation(insu,chrs,start,end)
df_insu=pd.read_table(insu,sep='\t',names=['chrs','start','end','insu'])
ax.tick_params(top='off',bottom='off',left='on',right='off')
line=ax.plot(df_insu['start'],df_insu['insu'], color=color, linewidth=0.8, label="insulation")
ax.set_xlim(start,end)
ax.set_xticks([])
ax.set_ylim(df_insu['insu'].min(),df_insu['insu'].max())
#ax.set_yticks([df_insu['insu'].min(),df_insu['insu'].max()])
for loc in ['left','top','bottom']:
ax.spines[loc].set_linewidth(0)
ax.spines[loc].set_color('black')
ax.spines['right'].set_linewidth(0)
ax.spines[loc].set_color('black')
def draw_SV(files,ax,chrom,start,end,sample,color,types):
markdf=pd.read_table(files,sep='\t')
markdf=markdf[markdf['types']==types]
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['sign']=[1]*len(markdf)
#vectorf = np.vectorize(np.float)
#vectori = np.vectorize(np.int)
#starts=list(markdf['start'])
#hight=list(markdf['sign'])
#width=(markdf['width'])
ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
ax.set_xlim([start,end])
ax.set_ylim([0,1])
xts = np.linspace(start,end,2)
yts = np.linspace(0,1,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks([])
#ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def cut_insulation(insu,chrs,start,end):
file=open(insu)
file_list=[]
for i in file:
i=i.strip()
file_list.append(i)
insu_list=[]
for i in range(len(file_list)):
x=file_list[i].split('/')
insu_list.append([x[-2],file_list[i]])
list_df=pd.DataFrame(insu_list,columns=['chrs','insu'])
list_df=list_df[list_df['chrs']==chrs]
list_df=list_df.reset_index(drop=True)
df_insu=pd.read_table(list_df['insu'][0],sep='\t',names=['chrs','start','end','insu'],comment='t')
df_insu['mid']=(df_insu['start']+df_insu['end'])/2
df_insu=df_insu.fillna(0)
df_insu=df_insu[(df_insu['start']>start)&(df_insu['end']<end)]
return df_insu
def draw_AB(files,res,chrom,start,end,sample,ax):
compartdf = pd.read_table(files,sep='\t',names=['chrom','start','end','eigen1'])
compartdf = compartdf[compartdf['chrom']==chrom]
compartdf = compartdf.reset_index(drop=True)
df = compartdf
df=df[df['end']>=start]
df=df[df['start']<=end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top','bottom']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['eigen1'][i]>0:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0,align='edge')
else:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0,align='edge')
ax.set_ylim(-0.1,0.1)
ax.set_ylabel(sample)
ax.set_yticks([])
ax.set_xticks([])
def Express_Swith(Epipath,chrom,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrom','start','end','sign']
Expressdf = Expressdf[Expressdf['chrom']==chrom]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_epigenetic(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_epigenetic2(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['width'] = markdf['end'] - markdf['start']
x = np.linspace(start,end,int(len(markdf)/8))
a_BSpline=make_interp_spline(markdf['start'],markdf['sign'],k=3)
y_new=a_BSpline(x)
ax.plot(x, y_new, color=color,linewidth=2)
ax.fill_between(x,y_new ,0,facecolor=color,linewidth=0,label=sample)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,4)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_RNA(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.4,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def Express_Swith(Epipath,chrs,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrs','start','end','sign']
Expressdf = Expressdf[Expressdf['chrs']==chrs]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_diff_epigenetic(file1,file2,ax,chrs,start,end,color,MaxYlim,MinYlim,type):
df1=Express_Swith(file1,chrs,start,end)
df2=Express_Swith(file2,chrs,start,end)
markdf = | pd.merge(df1,df2,on='start',how='inner') | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 23:22:49 2021
@author: maita
"""
# load libraries
import pandas as pd
import os, re
import datetime as dt
# Welches Jahr?
jahr = "2021"
# define paths
workingdir = "/mnt/c/Users/maita.schade/Nextcloud/Documents/Work/Gap_Map/"
# workingdir = "/home/maita/Nextcloud/Documents/Work/Gap_Map/"
rawdir = workingdir + "raw/"
rawdatadir = rawdir + "gtfs/" + jahr + "/"
outdir = workingdir + "out/"+jahr+"/"
def getRouteShortNames(scope):
# Relying on pre-separated routes file in raw directory
# takes scope prefix and gets short_names to filter for
# !!! This only works for Fernverkehr!!!
print("Scope for routes: "+ scope)
routespath = [s for s in os.listdir(rawdatadir) if re.search("routes_"+scope, s) ][0]
routes_df = pd.read_csv(rawdatadir + routespath)
routenames = routes_df.route_short_name.unique()
# routeids = routes_df.route_id.unique()
return(routenames)
def filterByRoute(stop_times_df, routenames, routes_path = rawdatadir + "routes.txt", trips_path = rawdatadir + "trips.txt"):
# Given a list of route_short_names included in scope
# relying on or taking routes and trips in rawdatadir
# takes stop_times and filters them to include only stops made on routes included in scope
print("Filtering routes...")
routes_df = pd.read_csv(routes_path, usecols = ["route_short_name", "route_id"])
trips_df = pd.read_csv(trips_path, usecols = ['route_id', "trip_id"])
stop_times_filtered = stop_times_df.merge(
trips_df.merge(
routes_df[routes_df["route_short_name"].isin(routenames)][["route_id"]], # which routes are ok?
how="right")[["trip_id"]], # which trips are on those routes?
how="right" # which stops were made on those trips?
)
print("Total stops: ", len(stop_times_filtered))
return(stop_times_filtered)
def countPerStop(stop_times_df):
# collapses DataFrame of stop_times to counts per stop_id
print("Counting stops")
if "days_count" in stop_times_df.columns:
return(stop_times_df.groupby('stop_id').sum().rename({"days_count":"n"},axis=1).reset_index()[['stop_id','n']])
else:
return(stop_times_df.groupby('stop_id').count().rename({"trip_id":"n"},axis=1).reset_index())
def addLocationsToStops(counts_df, stops_path = rawdatadir + "stops.txt"):
# Relying on or given path to file with stops and locations
# takes stop_time-counts DataFrame and enriches them with locations
# returns DataFrame of stops with stop information and counts
print("Adding locations to stops")
stops_df = pd.read_csv(stops_path)
return(stops_df.merge(counts_df, how="right", on = "stop_id"))
def readStopTimes(rawdatadir):
# helper function to read in necessary columns of stop_times file in rawdatadir
print("Loading " + rawdatadir)
return(pd.read_csv(rawdatadir + "stop_times.txt", usecols = ["stop_id","trip_id"]))
def interveningWeekdays(start, end, inclusive=True, weekdays=[0, 1, 2, 3, 4]):
# a useful function from Stackoverflow, to count particular weekdays in date range
if isinstance(start, dt.datetime):
start = start.date() # make a date from a datetime
if isinstance(end, dt.datetime):
end = end.date() # make a date from a datetime
if end < start:
# you can opt to return 0 or swap the dates around instead
# raise ValueError("start date must be before end date")
end, start = start, end
if inclusive:
end += dt.timedelta(days=1) # correct for inclusivity
try:
# collapse duplicate weekdays
weekdays = {weekday % 7 for weekday in weekdays}
except TypeError:
weekdays = [weekdays % 7]
ref = dt.date.today() # choose a reference date
ref -= dt.timedelta(days=ref.weekday()) # and normalize its weekday
# sum up all selected weekdays (max 7 iterations)
return sum((ref_plus - start).days // 7 - (ref_plus - end).days // 7
for ref_plus in
(ref + dt.timedelta(days=weekday) for weekday in weekdays))
def countDaysInIntervalHelper(calendarrow):
# function to find number of days of service operation based on calendars.txt-entry
servicedays = calendarrow[1:8].to_numpy().nonzero()[0].tolist()
startdate = dt.datetime.strptime(str(calendarrow.get("start_date")),"%Y%m%d")
enddate = dt.datetime.strptime(str(calendarrow.get("end_date")),"%Y%m%d")
# if enddate < startdate:
# print("switched start and end at ", calendarrow.get("service_id"))
return(interveningWeekdays(startdate, enddate, weekdays = servicedays))
def addFrequency(stop_times_df,
trips_path = rawdatadir + "trips.txt",
calendar_path = rawdatadir + "calendar.txt",
calendar_dates_path = rawdatadir + "calendar_dates.txt"):
# enriches stop_times DataFrame with information about how often in the feed
# period each stop is made
print("Getting number of service days for each stop_time")
# use service_id to find service...
# get regular service from calendar.txt
print("\t...reading regular service calendars")
calendar_df = pd.read_csv(calendar_path)
calendar_df["days_count"] = calendar_df.apply(countDaysInIntervalHelper, axis=1)
# and get exceptions from calendar_dates.txt
print("\t...reading calendar exceptions")
calendar_dates_df = pd.read_csv(calendar_dates_path)
print("\t...aggregating calendar")
calendar_df = calendar_dates_df.groupby(["service_id", "exception_type"], as_index=False
).count(
).pivot(index = "service_id", columns = "exception_type", values = "date"
).reset_index(
).merge(calendar_df, on="service_id", how="right")
print("\t...calculating total in calendar")
calendar_df.days_count= calendar_df.days_count + calendar_df[1].fillna(0) - calendar_df[2].fillna(0)
# use trip_id to look up associated trip
# from trip, look up service_id
print("\t...reading trips")
trips_df = | pd.read_csv(trips_path) | pandas.read_csv |
"""
Tests that work on both the Python and C engines but do not have a
specific classification into the other test modules.
"""
import csv
from io import StringIO
from pandas import DataFrame
import pandas._testing as tm
from pandas.io.parsers import TextParser
def test_read_data_list(all_parsers):
parser = all_parsers
kwargs = {"index_col": 0}
data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"
data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]
expected = parser.read_csv(StringIO(data), **kwargs)
with TextParser(data_list, chunksize=2, **kwargs) as parser:
result = parser.read()
tm.assert_frame_equal(result, expected)
def test_reader_list(all_parsers):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
parser = all_parsers
kwargs = {"index_col": 0}
lines = list(csv.reader(StringIO(data)))
with TextParser(lines, chunksize=2, **kwargs) as reader:
chunks = list(reader)
expected = parser.read_csv(StringIO(data), **kwargs)
| tm.assert_frame_equal(chunks[0], expected[:2]) | pandas._testing.assert_frame_equal |
import os
import glob
import scanpy as sc
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
import seaborn as sns
import matplotlib.pyplot as plt
import time
import datetime
import pickle
from scipy.stats import zscore
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from scipy.stats import mannwhitneyu, tiecorrect, rankdata
from statsmodels.stats.multitest import multipletests
# settings
plt.rc('font', size = 9)
plt.rc('font', family='sans serif')
plt.rcParams['pdf.fonttype']=42
plt.rcParams['ps.fonttype']=42
plt.rcParams['legend.frameon']=False
plt.rcParams['axes.grid']=False
plt.rcParams['legend.markerscale']=0.5
plt.rcParams['savefig.dpi']=600
sns.set_style("ticks")
pfp = '/home/ngr4/project/collabs/grants/czi_rp_2103/results/'
with open('/home/ngr4/project/collabs/grants/czi_rp_2103/data/processed/rpczi.pkl', 'rb') as f:
temp = pickle.load(f)
f.close()
adata = temp['adata']
# standard recipe
# sc.pp.combat(adata)
sc.tl.pca(adata)
sc.pp.neighbors(adata, n_neighbors=30, n_pcs=50)
sc.tl.umap(adata)
sc.tl.leiden(adata)
sc.pl.umap(adata, color=['leiden', 'source'])
# define markers & cell types
## TODO (before final polishes): add canonical cell type markers for human lung from Table S1 https://www.biorxiv.org/content/10.1101/742320v2.full.pdf
## REF: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5135277/ (most lung tissue markers come from here)
cell_markers = {'Basal': ['KRT5', 'DAPL1', 'TP63'],
'Basal (proliferating)': ['ADH7', 'MKI67', 'TOP2A', 'CDK1'],
'Hillock': ['KRT4', 'KRT13'],
'Club': [ 'KRT15', 'CD74','CXCL6'],
'Ciliated': ['FOXJ1', 'CCDC153', 'CCDC113', 'MLF1', 'LZTFL1','TUBB1','TP73','CCDC78'],
'Tuft': ['POU2F3', 'AVIL', 'MAFF','MIAT','NOS2'],
'Ionocyte': ['FOXI1', 'CFTR',], # 'ASCL3' not found
'Goblet': ['MUC5AC', 'MUC5B', 'SPDEF'],
'Epithelial':['ABCA3','LPCAT1','NAPSA','SFTPB','SFTPC','SLC34A2'],
'Neuroendocrine':['ACADSB','ADA','AFAP1','CPE'],
'Dendritic':['ITGAX','CCR7','CD1A','CD207'], # 'LY75' not found
# 'Macrophage':['CD68','CD14','CCL18','CD163'],
'Endothelial':['CD34','PECAM1','VWF'],
'Fibroblast':['THY1','CD36','PDGFRA','PTPN13'],
'Tcell':['CD3E','CD3D','CD3G','CD8A','CD8B','CD4'],
'Granulocyte':['CCR5','SMAD1','ITGAM'],
# 'Alveolar':['SLC34A2','ABCA3','CD44'],
'AT1':['SLC34A2','ABCA3','CD44','AGER','PDPN','CLIC5'],
'AT2':['SLC34A2','ABCA3','CD44','SFTPB','SFTPC','SFTPD','MUC1'],
'Myofibroblast':['ACTA2'],
'Monocyte':['CD36','CD14','CD68'],
'NK':['NCR1'],
'Progenitor':['TM4SF1','CEACAM6'],
# 'Neutrophil':['S100A9','S100A8','S100A12','VCAN','FCN1',
# 'CSTA','TSPO','CD14','MNDA','CTSD','PLBD1'], # from Tianyang (Iwasaki lab) ORIGINAL
# updated 051820
'Eosinophil':['RNASE2','LGALS1','RETN','AC020656.1', # 'RNASE3' not found
'H1FX','SLC44A1','AL355922.1','RFLNB','SERPINB10'], # from Tianyang (Iwasaki lab) ORIGINAL
# 'Macrophage':['S100A9','S100A8','FCGR3A','CD14','CD68','FCGR1A','MARCO','MSR1','MRC1','C1QB','C1QA','FABP4','APOC1','APOE','PPARG'],
# 'Monocyte':['S100A9','S100A8','FCGR3A','CD14','CD68','FCGR1A','RNASE2','RNASE3','FCN1','TNFRSF1B','S100A12','VCAN','CCR2','SDS'],
# 'Monocyte':['CCR2', 'FCN1', 'RNASE2', 'RNASE3', 'S100A12', 'SDS', 'TNFRSF1B', 'VCAN'], # no overlap btw Macrophage/Monocyte/Neutrophil
'Monocyte':['CCR2', 'FCN1', 'RNASE2', 'S100A12', 'SDS', 'TNFRSF1B', 'VCAN'],
'Macrophage':['APOC1', 'APOE', 'C1QA', 'C1QB', 'FABP4', 'MARCO', 'MRC1', 'MSR1', 'PPARG'], # no overlap btw Macrophage/Monocyte/Neutrophil
'Neutrophil':['CEACAM1', 'CEACAM8', 'CSF3R', 'CXCR1', 'CXCR2', 'FCGR3B'], # no overlap btw Macrophage/Monocyte/Neutrophil
# 'Neutrophil':['S100A9','S100A8','FCGR3A','CEACAM8','CXCR1','CXCR2','CEACAM1','FCGR3B','CSF3R'],
# 'Eosinophil':['RNASE2','RNASE3','IL5RA','CCR3','EPX','PRG2','PRG3','PTGDR2','SIGLEC8','GATA2'], # don't use RNASE2/3 since they overlap
# 'Eosinophil':['IL5RA','CCR3','PRG2','PTGDR2','SIGLEC8','GATA2'], # don't use RNASE2/3 since they overlap
# 'Eosinophil':['IL5RA','CCR3','PRG2','PTGDR2','SIGLEC8','GATA2', 'EPO','CD9','RNASE3','RETN','H1FX','RFLNB'], # added EPO and CD9 <>
}
# subset data to markers
genes = [g for k,v in cell_markers.items() for g in v]
x = pd.DataFrame(adata[:,genes].X, columns=genes)
x['cluster'] = adata.obs['leiden'].to_list()
add_pcs = True
if add_pcs:
# add PCs?
pcs = ['PC1','PC2']
for i,pc in enumerate(pcs):
x[pc] = adata.obsm['X_pca'][:,i]
genes = genes + pcs
# standard scale
x.loc[:,genes] = zscore(x.loc[:,genes])
results = pd.DataFrame()
fname = 'covid3balfs'
verbose = True
tic = time.time()
counter = 0
ORthreshold = 0.9
total_iter = len(cell_markers.keys())*len(x['cluster'].unique())
new_markers = {}
print('Lasso logistic regression')
for i,ctype in enumerate(cell_markers.keys()):
for j,cluster in enumerate(x['cluster'].unique()):
if verbose:
if counter % 50 == 0 and counter != 0:
p_through = counter / total_iter
toc = time.time() - tic
print(' through {:.1f}-% in {:.2f}-s\t~{:.2f}-s remain'.format(100*p_through,toc,(toc/counter)*(total_iter-counter)))
# binarize & subset
y = (x['cluster']==cluster).astype(int)
if add_pcs:
X = x.loc[:,cell_markers[ctype]+pcs]
else:
X = x.loc[:,cell_markers[ctype]]
# run default params (could add CV)
## results, solver='saga', time for ~25k cells: >>1min
## results, solver='lbfgs', time for ~25k cells: 14s
## results, solver='liblinear', time for ~25k cells: 25s
model = LogisticRegression(max_iter=10000,
penalty='l1',
tol=1e-6,
solver='liblinear') #n_jobs=-1 doesn't work for liblinear
model.fit(X, y)
status = 'OK'
if any(np.exp(model.coef_)[0][:-len(pcs)] < ORthreshold):
markers = [marker for i,marker in enumerate(cell_markers[ctype]) if i not in np.where(np.exp(model.coef_)[0][:-len(pcs)]<0.9)[0]]
if len(markers) != 0:
new_markers[ctype] = markers
if add_pcs:
X = x.loc[:,markers+pcs]
else:
X = x.loc[:,markers]
model = LogisticRegression(max_iter=10000,
penalty='l1',
tol=1e-6,
solver='liblinear') #n_jobs=-1 doesn't work for liblinear
model.fit(X, y)
else:
status = 'No markers with ORs >= {}'.format(ORthreshold)
else:
markers = cell_markers[ctype]
p1 = model.predict_proba(X)[:,1]
fpr, tpr, thresholds = metrics.roc_curve(y, p1)
optimal_idx = np.argmax(tpr-fpr)
optimal_threshold = thresholds[optimal_idx]
optimal_pred = (p1>optimal_threshold).astype(int)
precision,recall,_ = metrics.precision_recall_curve(y, p1)
auprc = metrics.auc(recall, precision)
auroc = metrics.roc_auc_score(y,p1)
ap = metrics.average_precision_score(y,p1)
bs = metrics.brier_score_loss(y,p1)
acc = metrics.accuracy_score(y,optimal_pred)
# store results
dt = pd.DataFrame({'ctype2pred':ctype,
'cluster':cluster,
'auroc':auroc,
'status':status,
'markers':[markers],
'ORs':np.exp(model.coef_).tolist(),
'ave_prec':ap,
'acc':acc,
'sensitivity':tpr[optimal_idx],
'specificity':1-fpr[optimal_idx]},
index=[0])
results = results.append(dt, ignore_index=True)
counter += 1
print('Classifiers done. Saving and plotting...')
top_per_ctype = pd.DataFrame()
top_n = 3
for ctype in results['ctype2pred'].unique():
dt = results.loc[results['ctype2pred']==ctype,:]
dt = dt.sort_values(by='auroc', ascending=False)
top_per_ctype = top_per_ctype.append(dt.iloc[0:top_n,:], ignore_index=True)
top_per_cluster = pd.DataFrame()
top_n = 3
for cluster in results['cluster'].unique():
dt = results.loc[results['cluster']==cluster,:]
dt = dt.sort_values(by='auroc', ascending=False)
top_per_cluster = top_per_cluster.append(dt.iloc[0:top_n,:], ignore_index=True)
if True:
top_per_cluster.to_csv(os.path.join(pfp,'top_ctype_per_cluster_{}.csv'.format(fname)))
# plot init annotation
## taking top ctype per cluster
top1_per_cluster = | pd.DataFrame() | pandas.DataFrame |
import time
import random
import numpy as np
import pandas as pd
import hdbscan
import sklearn.datasets
from sklearn import metrics
from classix import CLASSIX
from sklearn.cluster import KMeans
from sklearn.cluster import DBSCAN
from sklearn import preprocessing
from tqdm import tqdm
from sklearn.cluster import MeanShift
from quickshift.QuickshiftPP import *
import matplotlib.pyplot as plt
from tqdm import tqdm
import seaborn as sns
plt.style.use('bmh')
seed = 0
np.random.seed(seed)
random.seed(seed)
def test_kmeanspp_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
kmeans = KMeans(n_clusters=i, init='k-means++', random_state=1)
kmeans.fit(X)
ri = metrics.adjusted_rand_score(y, kmeans.labels_)
mi = metrics.adjusted_mutual_info_score(y, kmeans.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_meanshift_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
meanshift = MeanShift(bandwidth=i)
meanshift.fit(X)
ri = metrics.adjusted_rand_score(y, meanshift.labels_)
mi = metrics.adjusted_mutual_info_score(y, meanshift.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_dbscan_labels(X=None, y=None, _range=np.arange(0.05, 0.505, 0.005), minPts=5):
ar = list()
am = list()
for i in _range:
dbscan = DBSCAN(eps=i, n_jobs=1, min_samples=minPts)
dbscan.fit(X)
ri = metrics.adjusted_rand_score(y, dbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, dbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_hdbscan_labels(X=None, y=None, _range=np.arange(2, 21, 1)):
ar = list()
am = list()
for i in _range:
_hdbscan = hdbscan.HDBSCAN(min_cluster_size=int(i), algorithm='best')
_hdbscan.fit(X)
ri = metrics.adjusted_rand_score(y, _hdbscan.labels_)
mi = metrics.adjusted_mutual_info_score(y, _hdbscan.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def test_quickshiftpp_labels(X=None, y=None, _range=np.arange(2, 17, 1), beta=0.3):
ar = list()
am = list()
for i in _range:
quicks = QuickshiftPP(k=i, beta=beta)
quicks.fit(X.copy(order='C'))
ri = metrics.adjusted_rand_score(y, quicks.memberships)
mi = metrics.adjusted_mutual_info_score(y, quicks.memberships)
ar.append(ri)
am.append(mi)
return ar, am
def test_classix_radius_labels(X=None, y=None, method=None, minPts=1, sorting='pca', _range=np.arange(0.05, 0.3, 0.005)):
ar = list()
am = list()
for i in _range:
classix = CLASSIX(radius=i, minPts=minPts, post_alloc=True, sorting=sorting,
group_merging=method, verbose=0)
classix.fit(X)
ri = metrics.adjusted_rand_score(y, classix.labels_)
mi = metrics.adjusted_mutual_info_score(y, classix.labels_)
ar.append(ri)
am.append(mi)
return ar, am
def run_sensitivity_test(datasets, _range, clustering='CLASSIX (Density)', fix_k=1, sorting='pca', label_files=None):
np.random.seed(1)
X, y = datasets[0], datasets[1]
nonans = np.isnan(X).sum(1) == 0
X = X[nonans,:]
y = y[nonans]
X = (X - X.mean(axis=0)) / X.std(axis=0)
if clustering == 'CLASSIX (density)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='density', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'CLASSIX (distance)':
ari, ami = test_classix_radius_labels(X=X, y=y, method='distance', minPts=fix_k, sorting=sorting, _range=_range)
elif clustering == 'HDBSCAN':
ari, ami = test_hdbscan_labels(X=X, y=y, _range=_range)
elif clustering == 'DBSCAN':
ari, ami = test_dbscan_labels(X=X, y=y, _range=_range, minPts=fix_k)
elif clustering == 'Quickshift++':
ari, ami = test_quickshiftpp_labels(X=X, y=y, _range=_range, beta=fix_k)
elif clustering == 'k-means++':
ari, ami = test_kmeanspp_labels(X=X, y=y, _range=_range)
elif clustering == 'Meanshift':
ari, ami = test_meanshift_labels(X=X, y=y, _range=_range)
else:
raise ValueError('Specify a concrete clustering algorithms.')
store_df = pd.DataFrame()
store_df['Range'] = _range
store_df['ARI'] = ari
store_df['AMI'] = ami
store_df.to_csv('results/exp5/{}'.format(label_files)+clustering+'.csv', index=False)
def visualize_params_global():
plt.style.use('default')
datasets = ['Banknote', 'Dermatology', 'Ecoli', 'Glass', 'Iris', 'Phoneme', 'WheatSeeds', 'Wine']
algorithms = ['Meanshift', 'DBSCAN', 'HDBSCAN', 'Quickshift++', 'CLASSIX (distance)', 'CLASSIX (density)']
plot_num = 1
fontsize = 60
band = [0.5, 0.01, 0.5, 0.5, 0.015, 0.015]
plt.figure(figsize=(8.5*len(datasets), 9*len(algorithms)))
for data in datasets:
i = 0
for algorithm in algorithms:
store_df = pd.read_csv('results/exp5/{}'.format(data)+algorithm+'.csv')
_range = store_df['Range'].values
ars = store_df['ARI'].values
ami = store_df['AMI'].values
plt.rcParams['axes.facecolor'] = 'white'
plt.subplot(len(datasets), len(algorithms), plot_num)
plt.plot(_range, ars, label='ARI', marker='o', markersize=20, c='red')
plt.plot(_range, ami, label='AMI', marker='*', markersize=18, c='darkorange')
plt.ylim(-.05, 1.05)
plt.xticks([min(_range), max(_range)])
plt.yticks([0.5, 1])
plt.xlim(-band[i]+min(_range), band[i]+max(_range))
if plot_num == len(algorithms):
plt.legend(fontsize=fontsize, ncol=2, bbox_to_anchor=(1, 1.5))
plt.tick_params(axis='both', labelsize=fontsize)
plt.grid(True)
plt.subplots_adjust(bottom=0.01, left=0.01, right=0.99, top=0.99, wspace=0.15, hspace=0.15)
plot_num = plot_num + 1
i = i + 1
plt.tight_layout()
plt.savefig('results/exp5/ARI_AMI_PARAMS.pdf', bbox_inches='tight')
def visualize_params(_range, clustering='CLASSIX (Density)', label_files=None, band=0.01, fig_interval=1):
# sns.set(font_scale=5)
store_df = pd.read_csv('results/exp5/{}'.format(label_files)+clustering+'.csv')
_range = store_df['Range'].values
ami = store_df['AMI'].values
ari = store_df['ARI'].values
plt.figure(figsize=(6, 3.6))
plt.rcParams['axes.facecolor'] = 'white'
# plt.rc('font', family='serif')
plt.plot(_range, ari, label='ARI',
marker='o', markersize=10, c='red')
plt.plot(_range, ami, label='AMI',
marker='*', markersize=8, c='darkorange')
plt.legend(fontsize=32, fancybox=True, loc='best')
plt.ylim(-.05, 1.05)
# plt.xticks(np.arange(min(_range), max(_range)+1, fig_interval))
plt.xticks([min(_range), max(_range)])
plt.yticks([0, 0.5, 1])
plt.xlim(-band+min(_range), band+max(_range))
plt.tick_params(axis='both', labelsize=32)
plt.savefig('results/exp5/{}'.format(label_files)+clustering+'.pdf', bbox_inches='tight')
# plt.show()
def params_search():
datasets = []
data = pd.read_csv('data/Real_data/Banknote_authentication.csv')
X_banknote = data.drop(['4'],axis=1).values
y_banknote = data['4'].values
# print("Shape of banknote data: ", data.shape, ", labels: ", len(set(y_banknote)))
datasets.append((X_banknote, y_banknote))
data = pd.read_csv("data/Real_data/Dermatology.csv").values
X_dermatology = data[:, :data.shape[1]-1]
y_dermatology = data[:, data.shape[1]-1]
# print("Shape of Dermatology data: ", data.shape, ", labels: ", len(set(y_dermatology)))
datasets.append((X_dermatology, y_dermatology))
data = | pd.read_csv("data/Real_data/Ecoli.csv") | pandas.read_csv |
def setup_fs(s3, key="", secret="", endpoint="", cert="", passwords={}):
"""Given a boolean specifying whether to use local disk or S3, setup filesystem
Syntax examples: AWS (http://s3.us-east-2.amazonaws.com), MinIO (http://192.168.0.1:9000)
The cert input is relevant if you're using MinIO with TLS enabled, for specifying the path to the certficiate.
The block_size is set to accomodate files up to 55 MB in size. If your log files are larger, adjust this value accordingly
"""
if s3:
import s3fs
block_size = 55 * 1024 * 1024
if "amazonaws" in endpoint:
fs = s3fs.S3FileSystem(key=key, secret=secret, default_block_size=block_size)
elif cert != "":
fs = s3fs.S3FileSystem(
key=key,
secret=secret,
client_kwargs={"endpoint_url": endpoint, "verify": cert},
default_block_size=block_size,
)
else:
fs = s3fs.S3FileSystem(
key=key,
secret=secret,
client_kwargs={"endpoint_url": endpoint},
default_block_size=block_size,
)
else:
from pathlib import Path
import canedge_browser
base_path = Path(__file__).parent
fs = canedge_browser.LocalFileSystem(base_path=base_path, passwords=passwords)
return fs
# -----------------------------------------------
def load_dbc_files(dbc_paths):
"""Given a list of DBC file paths, create a list of conversion rule databases"""
import can_decoder
from pathlib import Path
db_list = []
for dbc in dbc_paths:
db = can_decoder.load_dbc(Path(__file__).parent / dbc)
db_list.append(db)
return db_list
# -----------------------------------------------
def list_log_files(fs, devices, start_times, verbose=True, passwords={}):
"""Given a list of device paths, list log files from specified filesystem.
Data is loaded based on the list of start datetimes
"""
import canedge_browser, mdf_iter
log_files = []
if len(start_times):
for idx, device in enumerate(devices):
start = start_times[idx]
log_files_device = canedge_browser.get_log_files(fs, [device], start_date=start, passwords=passwords)
log_files.extend(log_files_device)
if verbose:
print(f"Found {len(log_files)} log files\n")
return log_files
def restructure_data(df_phys, res, full_col_names=False, pgn_names=False):
import pandas as pd
from J1939_PGN import J1939_PGN
df_phys_join = pd.DataFrame({"TimeStamp": []})
if not df_phys.empty:
for message, df_phys_message in df_phys.groupby("CAN ID"):
for signal, data in df_phys_message.groupby("Signal"):
pgn = J1939_PGN(int(message)).pgn
if full_col_names == True and pgn_names == False:
col_name = str(hex(int(message))).upper()[2:] + "." + signal
elif full_col_names == True and pgn_names == True:
col_name = str(hex(int(message))).upper()[2:] + "." + str(pgn) + "." + signal
elif full_col_names == False and pgn_names == True:
col_name = str(pgn) + "." + signal
else:
col_name = signal
df_phys_join = pd.merge_ordered(
df_phys_join,
data["Physical Value"].rename(col_name).resample(res).pad().dropna(),
on="TimeStamp",
fill_method="none",
).set_index("TimeStamp")
return df_phys_join
def test_signal_threshold(df_phys, signal, threshold):
"""Illustrative example for how to extract a signal and evaluate statistical values
vs. defined thresholds. The function can be easily modified for your needs.
"""
df_signal = df_phys[df_phys["Signal"] == signal]["Physical Value"]
stats = df_signal.agg(["count", "min", "max", "mean", "std"])
delta = stats["max"] - stats["min"]
if delta > threshold:
print(f"{signal} exhibits a 'max - min' delta of {delta} exceeding threshold of {threshold}")
def add_custom_sig(df_phys, signal1, signal2, function, new_signal):
"""Helper function for calculating a new signal based on two signals and a function.
Returns a dataframe with the new signal name and physical values
"""
import pandas as pd
try:
s1 = df_phys[df_phys["Signal"] == signal1]["Physical Value"].rename(signal1)
s2 = df_phys[df_phys["Signal"] == signal2]["Physical Value"].rename(signal2)
df_new_sig = pd.merge_ordered(
s1,
s2,
on="TimeStamp",
fill_method="ffill",
).set_index("TimeStamp")
df_new_sig = df_new_sig.apply(lambda x: function(x[0], x[1]), axis=1).dropna().rename("Physical Value").to_frame()
df_new_sig["Signal"] = new_signal
df_phys = df_phys.append(df_new_sig)
except:
print(f"Warning: Custom signal {new_signal} not created\n")
return df_phys
# -----------------------------------------------
class ProcessData:
def __init__(self, fs, db_list, signals=[], days_offset=None, verbose=True):
from datetime import datetime, timedelta
self.db_list = db_list
self.signals = signals
self.fs = fs
self.days_offset = days_offset
self.verbose = verbose
if self.verbose == True and self.days_offset != None:
date_offset = (datetime.today() - timedelta(days=self.days_offset)).strftime("%Y-%m-%d")
print(
f"Warning: days_offset = {self.days_offset}, meaning data is offset to start at {date_offset}.\nThis is intended for sample data testing only. Set days_offset = None when processing your own data."
)
return
def extract_phys(self, df_raw):
"""Given df of raw data and list of decoding databases, create new def with
physical values (no duplicate signals and optionally filtered/rebaselined)
"""
import can_decoder
import pandas as pd
df_phys = pd.DataFrame()
for db in self.db_list:
df_decoder = can_decoder.DataFrameDecoder(db)
df_phys_temp = pd.DataFrame()
for length, group in df_raw.groupby("DataLength"):
df_phys_group = df_decoder.decode_frame(group)
df_phys_temp = df_phys_temp.append(df_phys_group)
df_phys = df_phys.append(df_phys_temp.sort_index())
# remove duplicates in case multiple DBC files contain identical signals
df_phys["datetime"] = df_phys.index
df_phys = df_phys.drop_duplicates(keep="first")
df_phys = df_phys.drop(labels="datetime", axis=1)
# optionally filter and rebaseline the data
df_phys = self.filter_signals(df_phys)
if not df_phys.empty and type(self.days_offset) == int:
df_phys = self.rebaseline_data(df_phys)
return df_phys
def rebaseline_data(self, df_phys):
"""Given a df of physical values, this offsets the timestamp
to be equal to today, minus a given number of days.
"""
from datetime import datetime, timezone
import pandas as pd
delta_days = (datetime.now(timezone.utc) - df_phys.index.min()).days - self.days_offset
df_phys.index = df_phys.index + pd.Timedelta(delta_days, "day")
return df_phys
def filter_signals(self, df_phys):
"""Given a df of physical values, return only signals matched by filter"""
if not df_phys.empty and len(self.signals):
df_phys = df_phys[df_phys["Signal"].isin(self.signals)]
return df_phys
def get_raw_data(self, log_file, lin=False, passwords={}):
"""Extract a df of raw data and device ID from log file.
Optionally include LIN bus data by setting lin=True
"""
import mdf_iter
with self.fs.open(log_file, "rb") as handle:
mdf_file = mdf_iter.MdfFile(handle, passwords=passwords)
device_id = self.get_device_id(mdf_file)
if lin:
df_raw_lin = mdf_file.get_data_frame_lin()
df_raw_lin["IDE"] = 0
df_raw_can = mdf_file.get_data_frame()
df_raw = df_raw_can.append(df_raw_lin)
else:
df_raw = mdf_file.get_data_frame()
return df_raw, device_id
def get_device_id(self, mdf_file):
return mdf_file.get_metadata()["HDComment.Device Information.serial number"]["value_raw"]
def print_log_summary(self, device_id, log_file, df_phys):
"""Print summary information for each log file"""
if self.verbose:
print(
"\n---------------",
f"\nDevice: {device_id} | Log file: {log_file.split(device_id)[-1]} [Extracted {len(df_phys)} decoded frames]\nPeriod: {df_phys.index.min()} - {df_phys.index.max()}\n",
)
# -----------------------------------------------
class MultiFrameDecoder:
"""BETA class for handling transport protocol data. For each response ID, identify
sequences of subsequent frames and combine the relevant parts of the data payloads
into a single payload with the response ID as the ID. The original raw dataframe is
then cleansed of the original response ID sequence frames. Instead, the new concatenated
frames are inserted. Further, the class supports DBC decoding of the resulting modified raw data
:param tp_type: the class supports UDS ("uds"), NMEA 2000 Fast Packets ("nmea") and J1939 ("j1939")
:param df_raw: dataframe of raw CAN data from the mdf_iter module
SINGLE_FRAME_MASK: mask used in matching single frames
FIRST_FRAME_MASK: mask used in matching first frames
CONSEQ_FRAME_MASK: mask used in matching consequtive frames
SINGLE_FRAME: frame type reflecting a single frame response
FIRST_FRAME: frame type reflecting the first frame in a multi frame response
CONSEQ_FRAME: frame type reflecting a consequtive frame in a multi frame response
ff_payload_start: the combined payload will start at this byte in the FIRST_FRAME
bam_pgn: this is used in J1939 and marks the initial BAM message ID in DEC
res_id_list_hex: TP 'response CAN IDs' to process. For nmea/j1939, these are provided by default
"""
def __init__(self, tp_type=""):
frame_struct_uds = {
"SINGLE_FRAME_MASK": 0xF0,
"FIRST_FRAME_MASK": 0xF0,
"CONSEQ_FRAME_MASK": 0xF0,
"SINGLE_FRAME": 0x00,
"FIRST_FRAME": 0x10,
"CONSEQ_FRAME": 0x20,
"ff_payload_start": 2,
"bam_pgn": -1,
"res_id_list_hex": [
"0x7A8",
"0x7E0",
"0x7E9",
"0x7EA",
"0x7EB",
"0x7EC",
"0x7ED",
"0x7EE",
"0x7EF",
"0x7EA",
"0x7BB",
"0x7C8",
"0x7CE",
],
}
frame_struct_j1939 = {
"SINGLE_FRAME_MASK": 0xFF,
"FIRST_FRAME_MASK": 0xFF,
"CONSEQ_FRAME_MASK": 0x00,
"SINGLE_FRAME": 0xFF,
"FIRST_FRAME": 0x20,
"CONSEQ_FRAME": 0x00,
"ff_payload_start": 8,
"bam_pgn": int("0xEC00", 16),
"res_id_list_hex": ["0xEB00"],
}
frame_struct_nmea = {
"SINGLE_FRAME_MASK": 0xFF,
"FIRST_FRAME_MASK": 0x1F,
"CONSEQ_FRAME_MASK": 0x00,
"SINGLE_FRAME": 0xFF,
"FIRST_FRAME": 0x00,
"CONSEQ_FRAME": 0x00,
"ff_payload_start": 2,
"bam_pgn": -1,
"res_id_list_hex": [
"0xfed8",
"0x1f007",
"0x1f008",
"0x1f009",
"0x1f014",
"0x1f016",
"0x1f101",
"0x1f105",
"0x1f201",
"0x1f208",
"0x1f209",
"0x1f20a",
"0x1f20c",
"0x1f20f",
"0x1f210",
"0x1f212",
"0x1f513",
"0x1f805",
"0x1f80e",
"0x1f80f",
"0x1f810",
"0x1f811",
"0x1f814",
"0x1f815",
"0x1f904",
"0x1f905",
"0x1fa04",
"0x1fb02",
"0x1fb03",
"0x1fb04",
"0x1fb05",
"0x1fb11",
"0x1fb12",
"0x1fd10",
"0x1fe07",
"0x1fe12",
"0x1ff14",
"0x1ff15",
],
}
if tp_type == "uds":
self.frame_struct = frame_struct_uds
elif tp_type == "j1939":
self.frame_struct = frame_struct_j1939
elif tp_type == "nmea":
self.frame_struct = frame_struct_nmea
else:
self.frame_struct = {}
self.tp_type = tp_type
return
def calculate_pgn(self, frame_id):
pgn = (frame_id & 0x03FFFF00) >> 8
pgn_f = (pgn & 0xFF00) >> 8
pgn_s = pgn & 0x00FF
if pgn_f < 240:
pgn &= 0xFFFFFF00
return pgn
def calculate_sa(self, frame_id):
sa = frame_id & 0x000000FF
return sa
def construct_new_tp_frame(self, base_frame, payload_concatenated, can_id):
new_frame = base_frame
new_frame.at["DataBytes"] = payload_concatenated
new_frame.at["DLC"] = 0
new_frame.at["DataLength"] = len(payload_concatenated)
if can_id:
new_frame.at["ID"] = can_id
return new_frame
def combine_tp_frames(self, df_raw):
import pandas as pd
bam_pgn = self.frame_struct["bam_pgn"]
res_id_list = [int(res_id, 16) for res_id in self.frame_struct["res_id_list_hex"]]
df_list_combined = []
# use PGN matching for J1939 and NMEA and update res_id_list to relevant entries
if self.tp_type == "nmea" or self.tp_type == "j1939":
res_id_list_incl_bam = res_id_list
res_id_list_incl_bam.append(bam_pgn)
df_raw_match = df_raw["ID"].apply(self.calculate_pgn).isin(res_id_list_incl_bam)
res_id_list = df_raw["ID"][df_raw_match].apply(self.calculate_pgn).drop_duplicates().values.tolist()
df_raw_tp = df_raw[df_raw_match]
df_raw_excl_tp = df_raw[~df_raw_match]
else:
df_raw_match = df_raw["ID"].isin(res_id_list)
res_id_list = df_raw["ID"][df_raw_match].drop_duplicates().values.tolist()
df_raw_tp = df_raw[df_raw_match]
df_raw_excl_tp = df_raw[~df_raw["ID"].isin(res_id_list)]
if len(df_raw.index) - len(df_raw_tp.index) - len(df_raw_excl_tp.index):
print("Warning - total rows does not equal sum of rows incl/excl transport protocol frames")
df_list_combined.append(df_raw_excl_tp)
for res_id in res_id_list:
# filter raw data for response ID and extract a 'base frame'
if self.tp_type == "nmea" or self.tp_type == "j1939":
df_raw_res_id = df_raw_tp[df_raw_tp["ID"].apply(self.calculate_pgn).isin([res_id, bam_pgn])]
df_raw_res_id = df_raw_res_id.copy()
df_raw_res_id["SA"] = df_raw_res_id.ID.apply(self.calculate_sa)
else:
df_raw_res_id = df_raw_tp[df_raw_tp["ID"].isin([res_id])]
if df_raw_res_id.empty:
continue
for channel, df_channel in df_raw_res_id.groupby("BusChannel"):
# if J1939, we can't group by CAN ID (as we need both bam_pgn and response)
if self.tp_type == "j1939":
group = "SA"
else:
group = "ID"
for identifier, df_raw_filter in df_channel.groupby(group):
base_frame = df_raw_filter.iloc[0]
frame_list = []
frame_timestamp_list = []
payload_concatenated = []
ff_length = 0xFFF
can_id = None
conseq_frame_prev = None
# iterate through rows in filtered dataframe
for index, row in df_raw_filter.iterrows():
first_byte = row["DataBytes"][0]
# check if first frame (either for UDS/NMEA or J1939 case)
if self.tp_type == "j1939" and bam_pgn == self.calculate_pgn(row["ID"]):
first_frame_test = True
elif (first_byte & self.frame_struct["FIRST_FRAME_MASK"]) == self.frame_struct["FIRST_FRAME"]:
first_frame_test = True
else:
first_frame_test = False
# if single frame, save frame directly (excl. 1st byte)
if self.tp_type != "nmea" and (first_byte & self.frame_struct["SINGLE_FRAME_MASK"] == self.frame_struct["SINGLE_FRAME"]):
new_frame = self.construct_new_tp_frame(base_frame, row["DataBytes"], row["ID"])
frame_list.append(new_frame.values.tolist())
frame_timestamp_list.append(index)
# if first frame, save info from prior multi frame response sequence,
# then initialize a new sequence incl. the first frame payload
elif first_frame_test:
# create a new frame using information from previous iterations
if len(payload_concatenated) >= ff_length:
new_frame = self.construct_new_tp_frame(base_frame, payload_concatenated, can_id)
frame_list.append(new_frame.values.tolist())
frame_timestamp_list.append(frame_timestamp)
# reset and start on next frame
payload_concatenated = []
conseq_frame_prev = None
frame_timestamp = index
# for J1939, extract PGN and convert to 29 bit CAN ID for use in baseframe
if self.tp_type == "j1939":
pgn_hex = "".join("{:02x}".format(x) for x in reversed(row["DataBytes"][5:8]))
pgn = int(pgn_hex, 16)
can_id = (6 << 26) | (pgn << 8) | 254
ff_length = (row["DataBytes"][0] & 0x0F) << 8 | row["DataBytes"][1]
for byte in row["DataBytes"][self.frame_struct["ff_payload_start"] :]:
payload_concatenated.append(byte)
# if consequtive frame, extend payload with payload excl. 1st byte
elif first_byte & self.frame_struct["CONSEQ_FRAME_MASK"] == self.frame_struct["CONSEQ_FRAME"]:
if (conseq_frame_prev == None) or ((first_byte - conseq_frame_prev) == 1):
conseq_frame_prev = first_byte
for byte in row["DataBytes"][1:]:
payload_concatenated.append(byte)
df_raw_res_id_new = | pd.DataFrame(frame_list, columns=base_frame.index, index=frame_timestamp_list) | pandas.DataFrame |
import os
import torch
import pickle
import collections
import math
import pandas as pd
import numpy as np
import networkx as nx
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import AllChem
from rdkit import DataStructs
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect
from torch.utils import data as torch_data
from torch_geometric.data import Data
from torch_geometric.data import InMemoryDataset
from torch_geometric.data import Batch
from itertools import repeat, product, chain
assert "DATASET_ROOT" in os.environ, "Environment variable DATASET_ROOT must be set"
# %% Atom and bond features
# Allowable node and edge features
allowable_features = {
'possible_atomic_num_list' : list(range(1, 119)),
'possible_formal_charge_list' : [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5],
'possible_chirality_list' : [
Chem.rdchem.ChiralType.CHI_UNSPECIFIED,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW,
Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW,
Chem.rdchem.ChiralType.CHI_OTHER
],
'possible_hybridization_list' : [
Chem.rdchem.HybridizationType.S,
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2, Chem.rdchem.HybridizationType.UNSPECIFIED
],
'possible_numH_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8],
'possible_implicit_valence_list' : [0, 1, 2, 3, 4, 5, 6],
'possible_degree_list' : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'possible_bonds' : [
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC
],
'possible_bond_dirs' : [ # only for double bond stereo information
Chem.rdchem.BondDir.NONE,
Chem.rdchem.BondDir.ENDUPRIGHT,
Chem.rdchem.BondDir.ENDDOWNRIGHT
]
}
num_atom_feautres = 2
def get_atom_feature(atom):
feat = [allowable_features['possible_atomic_num_list'].index(
atom.GetAtomicNum())] + [allowable_features[
'possible_chirality_list'].index(atom.GetChiralTag())]
return feat
num_bond_features = 2
def get_bond_feature(bond):
feat = [allowable_features['possible_bonds'].index(bond.GetBondType())] + \
[allowable_features['possible_bond_dirs'].index(bond.GetBondDir())]
return feat
# %% Graph featurization: rdkit <=> tg.data <=> nx
def mol_to_graph_data_obj_simple(mol):
"""
Converts rdkit mol object to graph Data object required by the pytorch
geometric package. NB: Uses simplified atom and bond features, and represent
as indices
:param mol: rdkit mol object
:return: graph data object with the attributes: x, edge_index, edge_attr
"""
# atoms
atom_features_list = []
for atom in mol.GetAtoms():
atom_feature = get_atom_feature(atom)
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
if len(mol.GetBonds()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for bond in mol.GetBonds():
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
edge_feature = get_bond_feature(bond)
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list),
dtype=torch.long)
else: # mol has no bonds
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
return Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
def graph_data_obj_to_mol_simple(d):
"""
Convert pytorch geometric data obj to rdkit mol object. NB: Uses simplified
atom and bond features, and represent as indices.
:param: data_x:
:param: data_edge_index:
:param: data_edge_attr
:return:
"""
data_x = d.x.cpu().numpy()
data_edge_index = d.edge_index.cpu().numpy()
data_edge_attr = d.edge_attr.cpu().numpy()
mol = Chem.RWMol()
# atoms
atom_features = data_x.cpu().numpy()
num_atoms = atom_features.shape[0]
for i in range(num_atoms):
atomic_num_idx, chirality_tag_idx = atom_features[i][:2]
atomic_num = allowable_features['possible_atomic_num_list'][atomic_num_idx]
chirality_tag = allowable_features['possible_chirality_list'][chirality_tag_idx]
atom = Chem.Atom(atomic_num)
atom.SetChiralTag(chirality_tag)
mol.AddAtom(atom)
# bonds
edge_index = data_edge_index.cpu().numpy()
edge_attr = data_edge_attr.cpu().numpy()
num_bonds = edge_index.shape[1]
for j in range(0, num_bonds, 2):
begin_idx = int(edge_index[0, j])
end_idx = int(edge_index[1, j])
bond_type_idx, bond_dir_idx = edge_attr[j][:2]
bond_type = allowable_features['possible_bonds'][bond_type_idx]
bond_dir = allowable_features['possible_bond_dirs'][bond_dir_idx]
mol.AddBond(begin_idx, end_idx, bond_type)
# set bond direction
new_bond = mol.GetBondBetweenAtoms(begin_idx, end_idx)
new_bond.SetBondDir(bond_dir)
# Chem.SanitizeMol(mol) # fails for COC1=CC2=C(NC(=N2)[S@@](=O)CC2=NC=C(
# C)C(OC)=C2C)C=C1, when aromatic bond is possible
# when we do not have aromatic bonds
# Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
return mol
def graph_data_obj_to_nx_simple(d):
"""
Converts graph Data object required by the pytorch geometric package to
network x data object. NB: Uses simplified atom and bond features,
and represent as indices. NB: possible issues with recapitulating relative
stereochemistry since the edges in the nx object are unordered.
:param data: pytorch geometric Data object
:return: network x object
"""
G = nx.Graph()
# atoms
atom_features = d.x.cpu().numpy()
num_atoms = atom_features.shape[0]
for i in range(num_atoms):
atomic_num_idx, chirality_tag_idx = atom_features[i][:2]
G.add_node(i, atom_num_idx=atomic_num_idx, chirality_tag_idx=chirality_tag_idx)
pass
# bonds
edge_index = d.edge_index.cpu().numpy()
edge_attr = d.edge_attr.cpu().numpy()
num_bonds = edge_index.shape[1]
for j in range(0, num_bonds, 2):
begin_idx = int(edge_index[0, j])
end_idx = int(edge_index[1, j])
bond_type_idx, bond_dir_idx = edge_attr[j][:2]
if not G.has_edge(begin_idx, end_idx):
G.add_edge(begin_idx, end_idx, bond_type_idx=bond_type_idx,
bond_dir_idx=bond_dir_idx)
return G
def nx_to_graph_data_obj_simple(G):
"""
Converts nx graph to pytorch geometric Data object. Assume node indices
are numbered from 0 to num_nodes - 1. NB: Uses simplified atom and bond
features, and represent as indices. NB: possible issues with
recapitulating relative stereochemistry since the edges in the nx
object are unordered.
:param G: nx graph obj
:return: pytorch geometric Data object
"""
# atoms
atom_features_list = []
for _, node in G.nodes(data=True):
atom_feature = [node['atom_num_idx'], node['chirality_tag_idx']]
atom_features_list.append(atom_feature)
x = torch.tensor(np.array(atom_features_list), dtype=torch.long)
# bonds
num_bond_features = 2 # bond type, bond direction
if len(G.edges()) > 0: # mol has bonds
edges_list = []
edge_features_list = []
for i, j, edge in G.edges(data=True):
edge_feature = [edge['bond_type_idx'], edge['bond_dir_idx']]
edges_list.append((i, j))
edge_features_list.append(edge_feature)
edges_list.append((j, i))
edge_features_list.append(edge_feature)
# data.edge_index: Graph connectivity in COO format with shape [2, num_edges]
edge_index = torch.tensor(np.array(edges_list).T, dtype=torch.long)
# data.edge_attr: Edge feature matrix with shape [num_edges, num_edge_features]
edge_attr = torch.tensor(np.array(edge_features_list),
dtype=torch.long)
else: # mol has no bonds
edge_index = torch.empty((2, 0), dtype=torch.long)
edge_attr = torch.empty((0, num_bond_features), dtype=torch.long)
return Data(x=x, edge_index=edge_index, edge_attr=edge_attr)
# %% Miscellaneous functions
def split_rdkit_mol_obj(mol):
"""
Split rdkit mol object containing multiple species or one species into a
list of mol objects or a list containing a single object respectively
:param mol:
:return:
"""
smiles = AllChem.MolToSmiles(mol, isomericSmiles=True)
smiles_list = smiles.split('.')
mol_species_list = []
for s in smiles_list:
if check_smiles_validity(s):
mol_species_list.append(AllChem.MolFromSmiles(s))
return mol_species_list
def check_smiles_validity(smiles):
try:
m = Chem.MolFromSmiles(smiles)
if m:
return True
else:
return False
except:
return False
def get_largest_mol(mol_list):
"""
Given a list of rdkit mol objects, returns mol object containing the
largest num of atoms. If multiple containing largest num of atoms,
picks the first one
:param mol_list:
:return:
"""
num_atoms_list = [len(m.GetAtoms()) for m in mol_list]
largest_mol_idx = num_atoms_list.index(max(num_atoms_list))
return mol_list[largest_mol_idx]
def create_standardized_mol_id(smiles):
"""
:param smiles:
:return: inchi
"""
if check_smiles_validity(smiles):
# remove stereochemistry
smiles = AllChem.MolToSmiles(AllChem.MolFromSmiles(smiles),
isomericSmiles=False)
mol = AllChem.MolFromSmiles(smiles)
if mol != None:
if '.' in smiles: # if multiple species, pick largest molecule
mol_species_list = split_rdkit_mol_obj(mol)
largest_mol = get_largest_mol(mol_species_list)
inchi = AllChem.MolToInchi(largest_mol)
else:
inchi = AllChem.MolToInchi(mol)
return inchi
else:
return
else:
return
def to_list(x):
if not isinstance(x, (tuple, list)):
x = [x]
return x
# %% Main dataset class
class MoleculeDataset(InMemoryDataset):
def __init__(self,
root,
#data = None,
#slices = None,
transform=None,
pre_transform=None,
pre_filter=None,
dataset='zinc250k',
empty=False):
"""
Adapted from qm9.py. Disabled the download functionality
:param root: directory of the dataset, containing a raw and processed
dir. The raw dir should contain the file containing the smiles, and the
processed dir can either empty or a previously processed file
:param dataset: name of the dataset. Currently only implemented for
zinc250k, chembl_with_labels, tox21, hiv, bace, bbbp, clintox, esol,
freesolv, lipophilicity, muv, pcba, sider, toxcast
:param empty: if True, then will not load any data obj. For
initializing empty dataset
"""
self.dataset = dataset
self.root = root
super(MoleculeDataset, self).__init__(root, transform, pre_transform,
pre_filter)
self.transform, self.pre_transform, self.pre_filter = transform, pre_transform, pre_filter
if not empty:
self.data, self.slices = torch.load(self.processed_paths[0])
def get(self, idx):
d = Data()
for key in self.data.keys:
item, slices = self.data[key], self.slices[key]
s = list(repeat(slice(None), item.dim()))
s[d.__cat_dim__(key, item)] = slice(slices[idx], slices[idx + 1])
d[key] = item[s]
return d
@property
def raw_dir(self):
return os.path.join(self.root, 'raw')
@property
def processed_dir(self):
return os.path.join(self.root, 'processed')
@property
def raw_file_names(self):
file_name_list = os.listdir(self.raw_dir)
# assert len(file_name_list) == 1 # currently assume we have a
# # single raw file
return file_name_list
@property
def processed_file_names(self):
return 'geometric_data_processed.pt'
@property
def raw_paths(self):
r"""The filepaths to find in order to skip the download."""
files = to_list(self.raw_file_names)
return [os.path.join(self.raw_dir, f) for f in files]
@property
def processed_paths(self):
r"""The filepaths to find in the :obj:`self.processed_dir`
folder in order to skip the processing."""
files = to_list(self.processed_file_names)
return [os.path.join(self.processed_dir, f) for f in files]
def download(self):
raise NotImplementedError('Must indicate valid location of raw data. '
'No download allowed')
def batch_import_from_rdkit(self, rdkit_mol_objs, ids=None, labels=None):
if ids is None:
ids = np.arange(len(rdkit_mol_objs))
assert len(rdkit_mol_objs) == len(ids)
data_list = []
for i in range(len(ids)):
if i % 10000 == 0:
print("\tFeaturized %d molecules" % i)
d = mol_to_graph_data_obj_simple(rdkit_mol_objs[i])
d.id = torch.tensor([ids[i]])
if labels is not None:
d.y = torch.tensor(labels[i])
data_list.append(d)
return data_list
def process(self):
smiles_list = []
data_list = []
if self.dataset == 'zinc_standard_agent':
smiles_list, rdkit_mol_objs, _, ids = _load_zinc_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=ids, labels=None)
elif self.dataset == 'chembl_filtered':
smiles_list, rdkit_mol_objs, labels, folds = _load_chembl_with_labels_dataset(self.raw_dir)
valid_mol_idx = _filter_mols(smiles_list,
rdkit_mol_objs,
downstream=True,
test_only=True,
molecular_weight=True)
smiles_list = [smiles_list[i] for i in valid_mol_idx]
rdkit_mol_objs = [rdkit_mol_objs[i] for i in valid_mol_idx]
labels = labels[np.array(valid_mol_idx)]
folds = folds[np.array(valid_mol_idx)]
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
for i, d in enumerate(data_list):
d.fold = folds[i]
elif self.dataset == 'bbbp':
smiles_list, rdkit_mol_objs, labels = _load_bbbp_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'clintox':
smiles_list, rdkit_mol_objs, labels = _load_clintox_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'hiv':
smiles_list, rdkit_mol_objs, labels = _load_hiv_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'tox21':
smiles_list, rdkit_mol_objs, labels = _load_tox21_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'esol':
smiles_list, rdkit_mol_objs, labels = _load_esol_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'freesolv':
smiles_list, rdkit_mol_objs, labels = _load_freesolv_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
elif self.dataset == 'lipophilicity':
smiles_list, rdkit_mol_objs, labels = _load_lipophilicity_dataset(self.raw_paths[0])
data_list = self.batch_import_from_rdkit(rdkit_mol_objs, ids=None, labels=labels)
# elif self.dataset == 'pcba':
# smiles_list, rdkit_mol_objs, labels = \
# _load_pcba_dataset(self.raw_paths[0])
# for i in range(len(smiles_list)):
# print(i)
# rdkit_mol = rdkit_mol_objs[i]
# # # convert aromatic bonds to double bonds
# # Chem.SanitizeMol(rdkit_mol,
# # sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
# data = mol_to_graph_data_obj_simple(rdkit_mol)
# # manually add mol id
# data.id = torch.tensor(
# [i]) # id here is the index of the mol in
# # the dataset
# data.y = torch.tensor(labels[i, :])
# data_list.append(data)
# data_smiles_list.append(smiles_list[i])
# elif self.dataset == 'pcba_pretrain':
# smiles_list, rdkit_mol_objs, labels = \
# _load_pcba_dataset(self.raw_paths[0])
# downstream_inchi = set(pd.read_csv(os.path.join(self.root,
# 'downstream_mol_inchi_may_24_2019'),
# sep=',', header=None)[0])
# for i in range(len(smiles_list)):
# print(i)
# if '.' not in smiles_list[i]: # remove examples with
# # multiples species
# rdkit_mol = rdkit_mol_objs[i]
# mw = Descriptors.MolWt(rdkit_mol)
# if 50 <= mw <= 900:
# inchi = create_standardized_mol_id(smiles_list[i])
# if inchi != None and inchi not in downstream_inchi:
# # # convert aromatic bonds to double bonds
# # Chem.SanitizeMol(rdkit_mol,
# # sanitizeOps=Chem.SanitizeFlags.SANITIZE_KEKULIZE)
# data = mol_to_graph_data_obj_simple(rdkit_mol)
# # manually add mol id
# data.id = torch.tensor(
# [i]) # id here is the index of the mol in
# # the dataset
# data.y = torch.tensor(labels[i, :])
# data_list.append(data)
# data_smiles_list.append(smiles_list[i])
else:
raise ValueError('Invalid dataset name')
if self.pre_filter is not None:
data_list = [d for d in data_list if self.pre_filter(d)]
if self.pre_transform is not None:
data_list = [self.pre_transform(d) for d in data_list]
# write data_smiles_list in processed paths
data_smiles_series = pd.Series(smiles_list)
data_smiles_series.to_csv(os.path.join(self.processed_dir, 'smiles.csv'),
index=False,
header=False)
collated_data, slices = self.collate(data_list)
torch.save((collated_data, slices), self.processed_paths[0])
# def merge_dataset_objs(dataset_1, dataset_2):
# """
# Naively merge 2 molecule dataset objects, and ignore identities of
# molecules. Assumes both datasets have multiple y labels, and will pad
# accordingly. ie if dataset_1 has obj_1 with y dim 1310 and dataset_2 has
# obj_2 with y dim 128, then the resulting obj_1 and obj_2 will have dim
# 1438, where obj_1 have the last 128 cols with 0, and obj_2 have
# the first 1310 cols with 0.
# :return: pytorch geometric dataset obj, with the x, edge_attr, edge_index,
# new y attributes only
# """
# d_1_y_dim = dataset_1[0].y.size()[0]
# d_2_y_dim = dataset_2[0].y.size()[0]
# data_list = []
# # keep only x, edge_attr, edge_index, padded_y then append
# for d in dataset_1:
# old_y = d.y
# new_y = torch.cat([old_y, torch.zeros(d_2_y_dim, dtype=torch.long)])
# data_list.append(Data(x=d.x, edge_index=d.edge_index,
# edge_attr=d.edge_attr, y=new_y))
# for d in dataset_2:
# old_y = d.y
# new_y = torch.cat([torch.zeros(d_1_y_dim, dtype=torch.long), old_y.long()])
# data_list.append(Data(x=d.x, edge_index=d.edge_index,
# edge_attr=d.edge_attr, y=new_y))
# # create 'empty' dataset obj. Just randomly pick a dataset and root path
# # that has already been processed
# new_dataset = MoleculeDataset(root='dataset/chembl_with_labels',
# dataset='chembl_with_labels', empty=True)
# # collate manually
# new_dataset.data, new_dataset.slices = new_dataset.collate(data_list)
# return new_dataset
# %% Data loading functions
def _load_zinc_dataset(input_path):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, None, list of mol ids
"""
input_df = pd.read_csv(input_path, sep=',', compression='gzip', dtype='str')
smiles_list = list(input_df['smiles'])
zinc_id_list = list(input_df['zinc_id'])
labels = None
valid_smiles_list = []
rdkit_mol_objs_list = []
ids = []
for id, smi in zip(zinc_id_list, smiles_list):
if len(ids) % 10000 == 0:
print("\tLoaded %d molecules" % len(ids))
try:
rdkit_mol = AllChem.MolFromSmiles(smi)
if not rdkit_mol is None: # ignore invalid mol objects
id = int(id.split('ZINC')[1].lstrip('0'))
rdkit_mol_objs_list.append(rdkit_mol)
valid_smiles_list.append(smi)
ids.append(id)
except:
continue
return valid_smiles_list, rdkit_mol_objs_list, labels, ids
def _load_chembl_with_labels_dataset(raw_folder):
"""
Data from 'Large-scale comparison of machine learning methods for drug target prediction on ChEMBL'
:param raw_path: path to the folder containing the reduced chembl dataset
:return: list of smiles, list of preprocessed rdkit mol obj, np.array
containing the labels, np.array of fold indices
"""
# 1. load folds and labels
with open(os.path.join(raw_folder, 'labelsHard.pckl'), 'rb') as f:
targetMat = pickle.load(f)
sampleAnnInd = pickle.load(f)
assert list(sampleAnnInd) == list(range(len(sampleAnnInd)))
targetMat = targetMat.copy().tocsr()
targetMat.sort_indices()
with open(os.path.join(raw_folder, 'folds0.pckl'), 'rb') as f:
folds = pickle.load(f)
fold_idx = -np.ones((len(sampleAnnInd),))
for i, fold in enumerate(folds):
fold_idx[np.array(fold)] = i
assert fold_idx.min() >= 0
denseLabels = targetMat.A # possible values are {-1, 0, 1}
# 2. load structures
with open(os.path.join(raw_folder, 'chembl20LSTM.pckl'), 'rb') as f:
rdkitArr = pickle.load(f)
assert len(rdkitArr) == denseLabels.shape[0]
valid_mol_idx = []
mol_list = []
smiles = []
for i, mol in enumerate(rdkitArr):
if len(valid_mol_idx) % 10000 == 0:
print("\tLoaded %d molecules" % len(valid_mol_idx))
if not mol is None:
mol_species_list = split_rdkit_mol_obj(mol)
if len(mol_species_list) > 0:
largest_mol = get_largest_mol(mol_species_list)
if mol.GetNumAtoms() > 2:
valid_mol_idx.append(i)
mol_list.append(largest_mol)
smiles.append(AllChem.MolToSmiles(largest_mol))
denseLabels = denseLabels[np.array(valid_mol_idx)]
fold_idx = fold_idx[np.array(valid_mol_idx)]
return smiles, mol_list, denseLabels, fold_idx
def _load_bbbp_dataset(input_path, remove_invalid_mols=True):
"""
:param input_path:
:param remove_invalid_mols:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['p_np']
# convert 0 to -1
labels = labels.replace(0, -1)
labels = labels.values.reshape((labels.shape[0], -1)) # Make sure label is 2D
# there are no nans
# Mask invalid molecules
invalid_mol_idx = [i for i, mol in enumerate(rdkit_mol_objs_list) if mol is None]
smiles_list = [smi if not i in invalid_mol_idx else None for i, smi in enumerate(smiles_list)]
if remove_invalid_mols:
valid_mol_idx = sorted(set(range(len(smiles_list))) - set(invalid_mol_idx))
smiles_list = [smiles_list[i] for i in valid_mol_idx]
rdkit_mol_objs_list = [rdkit_mol_objs_list[i] for i in valid_mol_idx]
labels = labels[np.array(valid_mol_idx)]
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_clintox_dataset(input_path, remove_invalid_mols=True):
"""
:param input_path:
:param remove_invalid_mols:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['FDA_APPROVED', 'CT_TOX']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
labels = labels.values.reshape((labels.shape[0], -1))
# there are no nans
# Mask invalid molecules
invalid_mol_idx = [i for i, mol in enumerate(rdkit_mol_objs_list) if mol is None]
smiles_list = [smi if not i in invalid_mol_idx else None for i, smi in enumerate(smiles_list)]
if remove_invalid_mols:
valid_mol_idx = sorted(set(range(len(smiles_list))) - set(invalid_mol_idx))
smiles_list = [smiles_list[i] for i in valid_mol_idx]
rdkit_mol_objs_list = [rdkit_mol_objs_list[i] for i in valid_mol_idx]
labels = labels[np.array(valid_mol_idx)]
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_hiv_dataset(input_path, remove_invalid_mols=False):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['HIV_active']
# convert 0 to -1
labels = labels.replace(0, -1)
labels = labels.values.reshape((labels.shape[0], -1))
# there are no nans
# No invalid molecules in esol
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_tox21_dataset(input_path, remove_invalid_mols=False):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
tasks = ['NR-AR', 'NR-AR-LBD', 'NR-AhR', 'NR-Aromatase', 'NR-ER', 'NR-ER-LBD',
'NR-PPAR-gamma', 'SR-ARE', 'SR-ATAD5', 'SR-HSE', 'SR-MMP', 'SR-p53']
labels = input_df[tasks]
# convert 0 to -1
labels = labels.replace(0, -1)
# convert nan to 0
labels = labels.fillna(0)
labels = labels.values.reshape((labels.shape[0], -1))
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_esol_dataset(input_path, remove_invalid_mols=False):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
# NB: some examples have multiple species
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['measured log solubility in mols per litre']
labels = labels.values.reshape((labels.shape[0], -1))
# No invalid molecules in esol
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_freesolv_dataset(input_path, remove_invalid_mols=False):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
input_df = pd.read_csv(input_path, sep=',')
smiles_list = input_df['smiles']
rdkit_mol_objs_list = [AllChem.MolFromSmiles(s) for s in smiles_list]
labels = input_df['expt']
labels = labels.values.reshape((labels.shape[0], -1))
assert len(smiles_list) == len(rdkit_mol_objs_list)
assert len(smiles_list) == len(labels)
return smiles_list, rdkit_mol_objs_list, labels
def _load_lipophilicity_dataset(input_path, remove_invalid_mols=False):
"""
:param input_path:
:return: list of smiles, list of rdkit mol obj, np.array containing the
labels (regression task)
"""
input_df = | pd.read_csv(input_path, sep=',') | pandas.read_csv |
import os
import yaml
import json
import pandas as pd
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
import glob
import time
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertYaml2PandasDataframeT20
# This function converts yaml files to Pandas dataframe and saves as CSV
#
###########################################################################################
def convertYaml2PandasDataframeT20(infile,source,dest):
'''
Converts and save T20 yaml files to pandasdataframes
Description
This function coverts all T20 Yaml files from source directory to pandas ata frames.
The data frames are then stored as .csv files The saved file is of the format
team1-team2-date.csv For e.g. Kolkata Knight Riders-Sunrisers Hyderabad-2016-05-22.csv etc
Usage
convertYaml2PandasDataframeT20(yamlFile,sourceDir=".",targetDir=".")
Arguments
yamlFile
The yaml file to be converted to dataframe and saved
sourceDir
The source directory of the yaml file
targetDir
The target directory in which the data frame is stored as RData file
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframeT20
Examples
# In the example below ../yamldir c
convertYaml2PandasDataframeT20("225171.yaml",".","../data")
'''
os.chdir(source)
os.path.join(source,infile)
# Read Yaml file and convert to json
print('Converting file:',infile)
with open(infile) as f:
a=yaml.load(f)
# 1st innings
deliveries=a['innings'][0]['1st innings']['deliveries']
#Create empty dataframe for team1
team1=pd.DataFrame()
# Loop through all the deliveries of 1st innings and append each row to dataframe
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team1=pd.concat([team1,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team1=team1.rename(columns={'batsman':'striker'})
# All extras column names
extras=[0,'wides','byes','legbyes','noballs','penalty']
if 'extras' in team1: #Check if extras are there
# Get the columns in extras for team1
b=team1.extras.apply(pd.Series).columns
# Find the missing extras columns
diff= list(set(extras) - set(b))
print('Team1:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team1=team1.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team1=pd.concat([team1,team1['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team1:",col)
team1[col]=0
team1=team1.drop(columns=0)
else:
print('Team1:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team1: #Check if runs in team1
team1=team1.rename(columns={'runs':'runs_dict'})
team1=pd.concat([team1,team1['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team1:Runs not present')
if 'wicket' in team1: #Check if wicket present
# Rename wicket as wicket_dict dict column as there is another wicket column
team1=team1.rename(columns={'wicket':'wicket_dict'})
team1=pd.concat([team1,team1['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team1: Wicket not present')
team1['team']=a['innings'][0]['1st innings']['team']
team1=team1.reset_index(inplace=False)
#Rename index to delivery
team1=team1.rename(columns={'index':'delivery'})
# 2nd innings - Check if the 2nd inning was played
if len(a['innings']) > 1: # Team2 played
deliveries=a['innings'][1]['2nd innings']['deliveries']
#Create empty dataframe for team1
team2=pd.DataFrame()
# Loop through all the deliveries of 1st innings
for i in range(len(deliveries)):
df = pd.DataFrame(deliveries[i])
b= df.T
team2=pd.concat([team2,b])
# Rename batsman to striker/non-striker as there is another column batsman who scored runs
team2=team2.rename(columns={'batsman':'striker'})
# Get the columns in extras for team1
if 'extras' in team2: #Check if extras in team2
b=team2.extras.apply(pd.Series).columns
diff= list(set(extras) - set(b))
print('Team2:diff:',diff)
# Rename extras dict column as there is another column extras which comes from runs_dict
team2=team2.rename(columns={'extras':'extras_dict'})
#Create new columns by splitting dictionary columns - extras and runs
team2=pd.concat([team2,team2['extras_dict'].apply(pd.Series)], axis=1)
# Add the missing columns
for col in diff:
print("team2:",col)
team2[col]=0
team2=team2.drop(columns=0)
else:
print('Team2:Extras not present')
# Rename runs columns to runs_dict
if 'runs' in team2:
team2=team2.rename(columns={'runs':'runs_dict'})
team2=pd.concat([team2,team2['runs_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:Runs not present')
if 'wicket' in team2:
# Rename wicket as wicket_dict column as there is another column wicket
team2=team2.rename(columns={'wicket':'wicket_dict'})
team2=pd.concat([team2,team2['wicket_dict'].apply(pd.Series)], axis=1)
else:
print('Team2:wicket not present')
team2['team']=a['innings'][1]['2nd innings']['team']
team2=team2.reset_index(inplace=False)
#Rename index to delivery
team2=team2.rename(columns={'index':'delivery'})
else: # Create empty columns for team2 so that the complete DF as all columns
team2 = pd.DataFrame()
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
team2 = team2.reindex(columns=cols)
#Check for missing columns. It is possible that no wickets for lost in the entire innings
cols=['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']
# Team1 - missing columns
msngCols=list(set(cols) - set(team1.columns))
print('Team1-missing columns:', msngCols)
for col in msngCols:
print("Adding:team1:",col)
team1[col]=0
# Team2 - missing columns
msngCols=list(set(cols) - set(team2.columns))
print('Team2-missing columns:', msngCols)
for col in msngCols:
print("Adding:team2:",col)
team2[col]=0
# Now both team1 and team2 should have the same columns. Concatenate
team1=team1[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
team2=team2[['delivery', 'striker', 'bowler', 'extras_dict', 'non_striker',\
'runs_dict', 'wicket_dict', 'wides', 'noballs', 'legbyes', 'byes', 'penalty',\
'kind','player_out','fielders',\
'batsman', 'extras', 'total', 'team']]
df=pd.concat([team1,team2])
#Fill NA's with 0s
df=df.fillna(0)
# Fill in INFO
print("Length of info field=",len(a['info']))
#City
try:
df['city']=a['info']['city']
except:
df['city'] =0
#Date
df['date']=a['info']['dates'][0]
#Gender
df['gender']=a['info']['gender']
#Match type
df['match_type']=a['info']['match_type']
# Neutral venue
try:
df['neutral_venue'] = a['info']['neutral_venue']
except KeyError as error:
df['neutral_venue'] = 0
#Outcome - Winner
try:
df['winner']=a['info']['outcome']['winner']
# Get the win type - runs, wickets etc
df['winType']=list(a['info']['outcome']['by'].keys())[0]
print("Wintype=",list(a['info']['outcome']['by'].keys())[0])
#Get the value of wintype
winType=list(a['info']['outcome']['by'].keys())[0]
print("Win value=",list(a['info']['outcome']['by'].keys())[0] )
# Get the win margin - runs,wickets etc
df['winMargin']=a['info']['outcome']['by'][winType]
print("win margin=", a['info']['outcome']['by'][winType])
except:
df['winner']=0
df['winType']=0
df['winMargin']=0
# Outcome - Tie
try:
df['result']=a['info']['outcome']['result']
df['resultHow']=list(a['info']['outcome'].keys())[0]
df['resultTeam'] = a['info']['outcome']['eliminator']
print(a['info']['outcome']['result'])
print(list(a['info']['outcome'].keys())[0])
print(a['info']['outcome']['eliminator'])
except:
df['result']=0
df['resultHow']=0
df['resultTeam']=0
try:
df['non_boundary'] = a['info']['non_boundary']
except KeyError as error:
df['non_boundary'] = 0
try:
df['ManOfMatch']=a['info']['player_of_match'][0]
except:
df['ManOfMatch']=0
# Identify the winner
df['overs']=a['info']['overs']
df['team1']=a['info']['teams'][0]
df['team2']=a['info']['teams'][1]
df['tossWinner']=a['info']['toss']['winner']
df['tossDecision']=a['info']['toss']['decision']
df['venue']=a['info']['venue']
# Rename column 'striker' to batsman
# Rename column 'batsman' to runs as it signifies runs scored by batsman
df=df.rename(columns={'batsman':'runs'})
df=df.rename(columns={'striker':'batsman'})
if (type(a['info']['dates'][0]) == str):
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0] + '.csv'
else:
outfile=a['info']['teams'][0]+ '-' + a['info']['teams'][1] + '-' +a['info']['dates'][0].strftime('%Y-%m-%d') + '.csv'
destFile=os.path.join(dest,outfile)
print(destFile)
df.to_csv(destFile,index=False)
print("Dataframe shape=",df.shape)
return df, outfile
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: convertAllYaml2PandasDataframesT20
# This function converts all yaml files to Pandas dataframes and saves as CSV
#
###########################################################################################
def convertAllYaml2PandasDataframesT20(source,dest):
'''
Convert and save all Yaml files to pandas dataframes and save as CSV
Description
This function coverts all Yaml files from source directory to data frames. The data frames are
then stored as .csv. The saved files are of the format team1-team2-date.RData For
e.g. England-India-2008-04-06.RData etc
Usage
convertAllYaml2PandasDataframesT20(sourceDir=".",targetDir=".")
Arguments
sourceDir
The source directory of the yaml files
targetDir
The target directory in which the data frames are stored as RData files
Value
None
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
convertYaml2PandasDataframe
Examples
# In the example below ../yamldir is the source dir for the yaml files
convertAllYaml2PandasDataframesT20("../yamldir","../data")
'''
files = os.listdir(source)
for index, file in enumerate(files):
print("\n\nFile no=",index)
if file.endswith(".yaml"):
df, filename = convertYaml2PandasDataframeT20(file, source, dest)
#print(filename)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getRuns
# This function gets the runs scored by batsmen
#
###########################################################################################
def getRuns(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Determine number of deliveries faced and runs scored
runs=df1[['batsman','runs']].groupby(['batsman'],sort=False,as_index=False).agg(['count','sum'])
# Drop level 0
runs.columns = runs.columns.droplevel(0)
runs=runs.reset_index(inplace=False)
runs.columns=['batsman','balls','runs']
return(runs)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getFours
# This function gets the fours scored by batsmen
#
###########################################################################################
def getFours(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
# Get number of 4s. Check if it is boundary (non_boundary=0)
m=df1.loc[(df1.runs >=4) & (df1.runs <6) & (df1.non_boundary==0)]
# Count the number of 4s
noFours= m[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
noFours.columns=['batsman','4s']
return(noFours)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getSixes
# This function gets the sixes scored by batsmen
#
###########################################################################################
def getSixes(df):
df1=df[['batsman','runs','extras','total','non_boundary']]
df2= df1.loc[(df1.runs ==6)]
sixes= df2[['batsman','runs']].groupby('batsman',sort=False,as_index=False).count()
sixes.columns=['batsman','6s']
return(sixes)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: getExtras
# This function gets the extras for the team
#
###########################################################################################
def getExtras(df):
df3= df[['total','wides', 'noballs', 'legbyes', 'byes', 'penalty', 'extras']]
a=df3.sum().astype(int)
#Convert series to dataframe
extras=a.to_frame().T
return(extras)
##########################################################################################
# Designed and developed by <NAME>
# Date : 27 Dec 2018
# Function: teamBattingScorecardMatch
# This function returns the team batting scorecard
#
###########################################################################################
def teamBattingScorecardMatch (match,theTeam):
'''
Team batting scorecard of a team in a match
Description
This function computes returns the batting scorecard (runs, fours, sixes, balls played) for the team
Usage
teamBattingScorecardMatch(match,theTeam)
Arguments
match
The match for which the score card is required e.g.
theTeam
Team for which scorecard required
Value
scorecard A data frame with the batting scorecard
Note
Maintainer: <NAME> <EMAIL>
Author(s)
<NAME>
References
http://cricsheet.org/
https://gigadom.in/
See Also
teamBatsmenPartnershipMatch
teamBowlingScorecardMatch
teamBatsmenVsBowlersMatch
Examples
x1,y1=teamBattingScorecardMatch(kkr_sh,"<NAME>")
print(x1)
print(y1)
'''
scorecard=pd.DataFrame()
if(match.size != 0):
team=match.loc[match['team'] == theTeam]
else:
return(scorecard,-1)
a1= getRuns(team)
b1= getFours(team)
c1= getSixes(team)
# Merge columns
d1= | pd.merge(a1, b1, how='outer', on='batsman') | pandas.merge |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_construct_from_string_raises(self):
with pytest.raises(TypeError, match="notatz"):
DatetimeTZDtype.construct_from_string('datetime64[ns, notatz]')
with pytest.raises(TypeError,
match="^Could not construct DatetimeTZDtype$"):
DatetimeTZDtype.construct_from_string(['datetime64[ns, notatz]'])
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetime64tz_dtype(s1)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetime64tz_dtype(s2)
with tm.assert_produces_warning(FutureWarning):
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
@pytest.mark.parametrize('tz', ['UTC', 'US/Eastern'])
@pytest.mark.parametrize('constructor', ['M8', 'datetime64'])
def test_parser(self, tz, constructor):
# pr #11245
dtz_str = '{con}[ns, {tz}]'.format(con=constructor, tz=tz)
result = DatetimeTZDtype.construct_from_string(dtz_str)
expected = DatetimeTZDtype('ns', tz)
assert result == expected
def test_empty(self):
with pytest.raises(TypeError, match="A 'tz' is required."):
DatetimeTZDtype()
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
with tm.assert_produces_warning(FutureWarning):
assert is_period(pidx)
s = Series(pidx, name='A')
assert is_period_dtype(s.dtype)
assert is_period_dtype(s)
with tm.assert_produces_warning(FutureWarning):
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
with tm.assert_produces_warning(FutureWarning):
assert not is_period(np.dtype('float64'))
with tm.assert_produces_warning(FutureWarning):
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype3
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
@pytest.mark.parametrize('subtype', [
'interval[int64]', 'Interval[int64]', 'int64', np.dtype('int64')])
def test_construction(self, subtype):
i = IntervalDtype(subtype)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [None, 'interval', 'Interval'])
def test_construction_generic(self, subtype):
# generic
i = IntervalDtype(subtype)
assert i.subtype is None
assert is_interval_dtype(i)
@pytest.mark.parametrize('subtype', [
CategoricalDtype(list('abc'), False),
CategoricalDtype(list('wxyz'), True),
object, str, '<U10', 'interval[category]', 'interval[object]'])
def test_construction_not_supported(self, subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
@pytest.mark.parametrize('subtype', ['xx', 'IntervalA', 'Interval[foo]'])
def test_construction_errors(self, subtype):
msg = 'could not construct IntervalDtype'
with pytest.raises(TypeError, match=msg):
IntervalDtype(subtype)
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
@pytest.mark.parametrize('string', [
0, 3.14, ('a', 'b'), None])
def test_construction_from_string_errors(self, string):
# these are invalid entirely
msg = 'a string needs to be passed, got type'
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
@pytest.mark.parametrize('string', [
'foo', 'foo[int64]', 'IntervalA'])
def test_construction_from_string_error_subtype(self, string):
# this is an invalid subtype
msg = ("Incorrectly formatted string passed to constructor. "
r"Valid formats include Interval or Interval\[dtype\] "
"where dtype is numeric, datetime, or timedelta")
with pytest.raises(TypeError, match=msg):
IntervalDtype.construct_from_string(string)
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype('IntervalA')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
# invalid subtype comparisons do not raise when directly compared
dtype1 = IntervalDtype('float64')
dtype2 = IntervalDtype('datetime64[ns, US/Eastern]')
assert dtype1 != dtype2
assert dtype2 != dtype1
@pytest.mark.parametrize('subtype', [
None, 'interval', 'Interval', 'int64', 'uint64', 'float64',
'complex128', 'datetime64', 'timedelta64', PeriodDtype('Q')])
def test_equality_generic(self, subtype):
# GH 18980
dtype = IntervalDtype(subtype)
assert is_dtype_equal(dtype, 'interval')
assert is_dtype_equal(dtype, IntervalDtype())
@pytest.mark.parametrize('subtype', [
'int64', 'uint64', 'float64', 'complex128', 'datetime64',
'timedelta64', PeriodDtype('Q')])
def test_name_repr(self, subtype):
# GH 18980
dtype = | IntervalDtype(subtype) | pandas.core.dtypes.dtypes.IntervalDtype |
import re
from typing import Optional
import warnings
import numpy as np
from pandas.errors import AbstractMethodError
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_hashable,
is_integer,
is_iterator,
is_list_like,
is_number,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCPeriodIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
import pandas.core.common as com
from pandas.io.formats.printing import pprint_thing
from pandas.plotting._matplotlib.compat import _mpl_ge_3_0_0
from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters
from pandas.plotting._matplotlib.style import _get_standard_colors
from pandas.plotting._matplotlib.tools import (
_flatten,
_get_all_lines,
_get_xlim,
_handle_shared_axes,
_subplots,
format_date_labels,
table,
)
class MPLPlot:
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = "vertical"
_default_rot = 0
orientation: Optional[str] = None
_pop_attributes = [
"label",
"style",
"logy",
"logx",
"loglog",
"mark_right",
"stacked",
]
_attr_defaults = {
"logy": False,
"logx": False,
"loglog": False,
"mark_right": True,
"stacked": False,
}
def __init__(
self,
data,
kind=None,
by=None,
subplots=False,
sharex=None,
sharey=False,
use_index=True,
figsize=None,
grid=None,
legend=True,
rot=None,
ax=None,
fig=None,
title=None,
xlim=None,
ylim=None,
xticks=None,
yticks=None,
sort_columns=False,
fontsize=None,
secondary_y=False,
colormap=None,
table=False,
layout=None,
include_bool=False,
**kwds,
):
import matplotlib.pyplot as plt
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else plt.rcParams["axes.grid"]
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop("xerr", None)
yerr = kwds.pop("yerr", None)
self.errors = {
kw: self._parse_errorbars(kw, err)
for kw, err in zip(["xerr", "yerr"], [xerr, yerr])
}
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndexClass)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if "cmap" in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif "cmap" in kwds:
self.colormap = kwds.pop("cmap")
else:
self.colormap = colormap
self.table = table
self.include_bool = include_bool
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
import matplotlib.colors
if (
"color" in self.kwds
and self.nseries == 1
and not is_list_like(self.kwds["color"])
):
# support series.plot(color='green')
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds
and isinstance(self.kwds["color"], tuple)
and self.nseries == 1
and len(self.kwds["color"]) in (3, 4)
):
# support RGB and RGBA tuples in series plot
self.kwds["color"] = [self.kwds["color"]]
if (
"color" in self.kwds or "colors" in self.kwds
) and self.colormap is not None:
warnings.warn(
"'color' and 'colormap' cannot be used simultaneously. Using 'color'"
)
if "color" in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
for char in s:
if char in matplotlib.colors.BASE_COLORS:
raise ValueError(
"Cannot pass 'style' string with a color symbol and "
"'color' keyword argument. Please use one or the other or "
"pass 'style' without a color symbol"
)
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
for col, values in data.items():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, "right_ax"):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, "left_ax"):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
if self.logy is True or self.loglog is True:
new_ax.set_yscale("log")
elif self.logy == "sym" or self.loglog == "sym":
new_ax.set_yscale("symlog")
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(
naxes=self.nseries,
sharex=self.sharex,
sharey=self.sharey,
figsize=self.figsize,
ax=self.ax,
layout=self.layout,
layout_type=self._layout_type,
)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
valid_log = {False, True, "sym", None}
input_log = {self.logx, self.logy, self.loglog}
if input_log - valid_log:
invalid_log = next(iter((input_log - valid_log)))
raise ValueError(
f"Boolean, None and 'sym' are valid options, '{invalid_log}' is given."
)
if self.logx is True or self.loglog is True:
[a.set_xscale("log") for a in axes]
elif self.logx == "sym" or self.loglog == "sym":
[a.set_xscale("symlog") for a in axes]
if self.logy is True or self.loglog is True:
[a.set_yscale("log") for a in axes]
elif self.logy == "sym" or self.loglog == "sym":
[a.set_yscale("symlog") for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (
is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries
)
if sec_true or all_sec:
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = "None"
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
include_type = [np.number, "datetime", "datetimetz", "timedelta"]
# GH23719, allow plotting boolean
if self.include_bool is True:
include_type.append(np.bool_)
# GH22799, exclude datatime-like type for boxplot
exclude_type = None
if self._kind == "box":
# TODO: change after solving issue 27881
include_type = [np.number]
exclude_type = ["timedelta"]
# GH 18755, include object and category type for scatter plot
if self._kind == "scatter":
include_type.extend(["object", "category"])
numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type)
try:
is_empty = numeric_data.columns.empty
except AttributeError:
is_empty = not len(numeric_data)
# no non-numeric frames or series allowed
if is_empty:
raise TypeError("no numeric data to plot")
# GH25587: cast ExtensionArray of pandas (IntegerArray, etc.) to
# np.ndarray before plot.
numeric_data = numeric_data.copy()
for col in numeric_data:
numeric_data[col] = np.asarray(numeric_data[col])
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
if self.orientation == "vertical" or self.orientation is None:
self._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
elif self.orientation == "horizontal":
self._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, "right_ax"):
self._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(
axarr=all_axes,
nplots=len(all_axes),
naxes=nrows * ncols,
nrows=nrows,
ncols=ncols,
sharex=self.sharex,
sharey=self.sharey,
)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
raise ValueError(
"The length of `title` must equal the number "
"of columns if using `title` of type `list` "
"and `subplots=True`.\n"
f"length of title = {len(self.title)}\n"
f"number of columns = {self.nseries}"
)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = (
"Using `title` of type `list` is not supported "
"unless `subplots=True` is passed"
)
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
""" Tick creation within matplotlib is reasonably expensive and is
internally deferred until accessed as Ticks are created/destroyed
multiple times per draw. It's therefore beneficial for us to avoid
accessing unless we will act on the Tick.
"""
if rot is not None or fontsize is not None:
# rot=0 is a valid setting, hence the explicit None check
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, ABCMultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing, self.data.columns.names)
return ",".join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + " (right)"
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg, handle = self._get_ax_legend_handle(self.axes[0])
handles = []
labels = []
title = ""
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
# Replace leg.LegendHandles because it misses marker info
handles.extend(handle)
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == "reverse":
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc="best", title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc="best")
def _get_ax_legend_handle(self, ax):
"""
Take in axes and return ax, legend and handle under different scenarios
"""
leg = ax.get_legend()
# Get handle from axes
handle, _ = ax.get_legend_handles_labels()
other_ax = getattr(ax, "left_ax", None) or getattr(ax, "right_ax", None)
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg, handle
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ("datetime", "date", "datetime64", "time")
if self.use_index:
if convert_period and isinstance(index, ABCPeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = list(range(len(index)))
else:
x = list(range(len(index)))
return x
@classmethod
@register_pandas_matplotlib_converters
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, ABCIndexClass):
x = x._mpl_repr()
if is_errorbar:
if "xerr" in kwds:
kwds["xerr"] = np.array(kwds.get("xerr"))
if "yerr" in kwds:
kwds["yerr"] = np.array(kwds.get("yerr"))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, ABCMultiIndex):
name = self.data.index.names
if com.any_not_none(*name):
name = ",".join(pprint_thing(x) for x in name)
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, "left_ax", ax)
else:
return getattr(ax, "right_ax", ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
@classmethod
def get_default_ax(cls, ax):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
with plt.rc_context():
ax = plt.gca()
ax = cls._get_ax_layer(ax)
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndexClass)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = "color" in kwds or self.colormap is not None
nocolor_style = style is None or re.match("[a-z]+", style) is None
if (has_color or self.subplots) and nocolor_style:
kwds["color"] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds="color"):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(
num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds),
)
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, ABCDataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, ABCSeries):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, str):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (
(err_shape[0] != self.nseries)
or (err_shape[1] != 2)
or (err_shape[2] != len(self.data))
):
raise ValueError(
"Asymmetrical error bars should be provided "
f"with the shape ({self.nseries}, 2, {len(self.data)})"
)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = f"No valid {label} detected"
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
errors = {}
for kw, flag in zip(["xerr", "yerr"], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (ABCDataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [
ax for ax in self.axes[0].get_figure().get_axes() if isinstance(ax, Subplot)
]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = "single"
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + " requires an x and y column")
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
# Scatter plot allows to plot objects data
if self._kind == "hexbin":
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires x column to be numeric")
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + " requires y column to be numeric")
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel( | pprint_thing(y) | pandas.io.formats.printing.pprint_thing |
from re import A
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
import pandas as pd
import numpy as np
import os, sys
from scripts.constants import *
# https://abseil.io/docs/python/guides/flags
from absl import flags
FLAGS = flags.FLAGS
## Hparams
flags.DEFINE_string("merged_path", None, "merged_path")
flags.DEFINE_list("csv_paths", None, "csv paths")
flags.DEFINE_integer("max_x", None, "max value of x")
flags.DEFINE_integer("window_size", 20, "window size of the plot")
flags.DEFINE_float("last_steps_ratio", 0.80, "for measureing top k")
flags.DEFINE_list("factors", None, "selected single factors")
flags.DEFINE_string("best_variant", None, "best variant of our method if given")
flags.DEFINE_list("other_methods", None, "selective other methods to show if given")
flags.DEFINE_string("name", None, "for plot tabular results")
flags.FLAGS(sys.argv)
print(FLAGS.flags_into_string())
### 1. Preprocess
assert len(FLAGS.csv_paths) == 2
df_D = pd.read_csv(FLAGS.csv_paths[0])
df_R = pd.read_csv(FLAGS.csv_paths[1])
assert len(df_D) == len(df_R)
variant_tag_names = [
variant_tag_name
for variant_tag_name in variant_tag_names
if variant_tag_name in df_D.columns
]
if FLAGS.factors is None:
FLAGS.factors = variant_tag_names
assert set(FLAGS.factors) <= set(variant_tag_names) # subset check
# join (merge) two dfs
df_D[merged_tag] = df_D[variant_tag_names].astype(str).agg("-".join, axis=1)
df_R[merged_tag] = df_R[variant_tag_names].astype(str).agg("-".join, axis=1)
df_D = df_D.reset_index() # add column 'index' to keep env steps order for sorting
df_R = df_R.reset_index()
df_D = df_D.sort_values(by=["instance", "index"], ignore_index=True)
df_R = df_R.sort_values(by=["instance", "index"], ignore_index=True)
# HACK: use concat to join the subtable... make one seed trial has both D and R results...
df = pd.concat([df_D, df_R[["succ_RR", "succ_RE"]]], axis=1)
# create new tags
for (new_tag, raw_tags) in generalization_tags.items():
df[new_tag] = df[raw_tags].mean(axis=1) # avg over raw tags
os.makedirs(FLAGS.merged_path, exist_ok=True)
# extract the tags
x_tag = x_tag[1]
key_of_interests = list(generalization_tags.keys())
if FLAGS.max_x is not None:
df = df.loc[df[x_tag] <= FLAGS.max_x] # set max_x
# smoothing
for key in key_of_interests:
df[key] = df.groupby([*variant_tag_names, trial_tag])[key].transform(
lambda x: x.rolling(FLAGS.window_size, min_periods=1).mean() # rolling mean
)
### 2. plot single factor
# make a square-like plot, show Interpolation and Extrapolation only, so use [1:]
num_plots = len(key_of_interests[1:]) * max(1, len(FLAGS.factors))
cols = int(np.ceil(np.sqrt(num_plots)))
rows = int(np.ceil(num_plots / cols))
# seaborn plot
sns.set(font_scale=2.0)
fig, axes = plt.subplots(rows, cols, figsize=(cols * 7, rows * 4))
axes = (
axes.flatten() if isinstance(axes, np.ndarray) else [axes]
) # make it as a flat list
# use lineplot that has average curve (for same x-value) with 95% confidence interval on y-value
# https://seaborn.pydata.org/generated/seaborn.lineplot.html
# has at most 3 independent dims to plot, using hue and style. But recommend to use at most 2 dims,
# by setting hue and style the same key
# NOTE: any seaborn function has argument ax to support subplots
df_ours = df.loc[df["method"] == "ours"]
df_others = df.loc[df["method"] != "ours"]
ax_id = 0
for key in key_of_interests[1:]:
for variant_tag_name in FLAGS.factors:
sns.lineplot(
ax=axes[ax_id],
data=df_ours,
x=x_tag,
y=key,
hue=variant_tag_name,
# hue_order=order,
# style=variant_tag,
# style_order=order,
# ci=None, # save a lot time without error bars
sort=False,
)
axes[ax_id].legend(framealpha=0.5, loc="upper left")
axes[ax_id].set_title(variant_tag_name)
# if FLAGS.max_x is not None:
# axes[ax_id].set_xlim(0, FLAGS.max_x)
ax_id += 1
# set the rest subplots blank
while ax_id < rows * cols:
axes[ax_id].set_visible(False)
ax_id += 1
plt.tight_layout()
# plt.show()
# plt.close()
plt.savefig(
os.path.join(
FLAGS.merged_path,
f"single_factor-{''.join(FLAGS.factors)}-window{FLAGS.window_size}.png",
),
dpi=200,
bbox_inches="tight",
)
plt.close()
### 3. draw the top curves
def get_run_down(dataframe, key, last_steps_ratio=FLAGS.last_steps_ratio):
dataframe[key + auc_tag] = dataframe.groupby([merged_tag, trial_tag])[
key
].transform(
lambda x: x[int(last_steps_ratio * len(x)) :].mean() # last few timesteps
)
tmp_df = dataframe.groupby([merged_tag, trial_tag]).tail(1) # last timestep
run_down = tmp_df.groupby([merged_tag])[
key + auc_tag
].mean() # avg auc for each instance
run_down_std = tmp_df.groupby([merged_tag])[
key + auc_tag
].std() # NOTE: std over seeds (not env steps!)
run_down_std.name += "_std"
return run_down, run_down_std
topk_tags_dict = {}
run_down_df = []
for key in key_of_interests:
# select top k of ours
run_down_ours, run_down_ours_std = get_run_down(df_ours, key)
run_down_ours = run_down_ours.nlargest(run_down_ours.shape[0])
# keep all the others
run_down_others, run_down_others_std = get_run_down(df_others, key)
run_down_sorted = pd.concat([run_down_ours, run_down_others]).sort_values(
ascending=False
)
run_down_std = | pd.concat([run_down_ours_std, run_down_others_std]) | pandas.concat |
# flake8: noqa: F841
import tempfile
from pathlib import Path
from typing import List
from pandas._typing import Scalar, ArrayLike
import pandas as pd
import numpy as np
from pandas.core.window import ExponentialMovingWindow
def test_types_init() -> None:
pd.Series(1)
pd.Series((1, 2, 3))
pd.Series(np.array([1, 2, 3]))
pd.Series(data=[1, 2, 3, 4], name="series")
pd.Series(data=[1, 2, 3, 4], dtype=np.int8)
pd.Series(data={'row1': [1, 2], 'row2': [3, 4]})
pd.Series(data=[1, 2, 3, 4], index=[4, 3, 2, 1], copy=True)
def test_types_any() -> None:
res1: bool = pd.Series([False, False]).any()
res2: bool = pd.Series([False, False]).any(bool_only=False)
res3: bool = pd.Series([np.nan]).any(skipna=False)
def test_types_all() -> None:
res1: bool = pd.Series([False, False]).all()
res2: bool = pd.Series([False, False]).all(bool_only=False)
res3: bool = pd.Series([np.nan]).all(skipna=False)
def test_types_csv() -> None:
s = pd.Series(data=[1, 2, 3])
csv_df: str = s.to_csv()
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name)
s2: pd.DataFrame = pd.read_csv(file.name)
with tempfile.NamedTemporaryFile() as file:
s.to_csv(Path(file.name))
s3: pd.DataFrame = pd.read_csv(Path(file.name))
# This keyword was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
with tempfile.NamedTemporaryFile() as file:
s.to_csv(file.name, errors='replace')
s4: pd.DataFrame = pd.read_csv(file.name)
def test_types_copy() -> None:
s = pd.Series(data=[1, 2, 3, 4])
s2: pd.Series = s.copy()
def test_types_select() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s[0]
s[1:]
def test_types_iloc_iat() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.iat[0]
s2.loc[0]
s2.iat[0]
def test_types_loc_at() -> None:
s = pd.Series(data={'row1': 1, 'row2': 2})
s2 = pd.Series(data=[1, 2])
s.loc['row1']
s.at['row1']
s2.loc[1]
s2.at[1]
def test_types_boolean_indexing() -> None:
s = pd.Series([0, 1, 2])
s[s > 1]
s[s]
def test_types_df_to_df_comparison() -> None:
s = pd.Series(data={'col1': [1, 2]})
s2 = pd.Series(data={'col1': [3, 2]})
res_gt: pd.Series = s > s2
res_ge: pd.Series = s >= s2
res_lt: pd.Series = s < s2
res_le: pd.Series = s <= s2
res_e: pd.Series = s == s2
def test_types_head_tail() -> None:
s = pd.Series([0, 1, 2])
s.head(1)
s.tail(1)
def test_types_sample() -> None:
s = pd.Series([0, 1, 2])
s.sample(frac=0.5)
s.sample(n=1)
def test_types_nlargest_nsmallest() -> None:
s = pd.Series([0, 1, 2])
s.nlargest(1)
s.nlargest(1, 'first')
s.nsmallest(1, 'last')
s.nsmallest(1, 'all')
def test_types_filter() -> None:
s = pd.Series(data=[1, 2, 3, 4], index=['cow', 'coal', 'coalesce', ''])
s.filter(items=['cow'])
s.filter(regex='co.*')
s.filter(like='al')
def test_types_setting() -> None:
s = pd.Series([0, 1, 2])
s[3] = 4
s[s == 1] = 5
s[:] = 3
def test_types_drop() -> None:
s = pd.Series([0, 1, 2])
res: pd.Series = s.drop(0)
res2: pd.Series = s.drop([0, 1])
res3: pd.Series = s.drop(0, axis=0)
res4: None = s.drop([0, 1], inplace=True, errors='raise')
res5: None = s.drop([0, 1], inplace=True, errors='ignore')
def test_types_drop_multilevel() -> None:
index = pd.MultiIndex(levels=[['top', 'bottom'], ['first', 'second', 'third']],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
s = pd.Series(data=[1, 2, 3, 4, 5, 6], index=index)
res: pd.Series = s.drop(labels='first', level=1)
def test_types_dropna() -> None:
s = pd.Series([1, np.nan, np.nan])
res: pd.Series = s.dropna()
res2: None = s.dropna(axis=0, inplace=True)
def test_types_fillna() -> None:
s = pd.Series([1, np.nan, np.nan, 3])
res: pd.Series = s.fillna(0)
res2: pd.Series = s.fillna(0, axis='index')
res3: pd.Series = s.fillna(method='backfill', axis=0)
res4: None = s.fillna(method='bfill', inplace=True)
res5: pd.Series = s.fillna(method='pad')
res6: pd.Series = s.fillna(method='ffill', limit=1)
def test_types_sort_index() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_index()
res2: None = s.sort_index(ascending=False, inplace=True)
res3: pd.Series = s.sort_index(kind="mergesort")
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_index_with_key() -> None:
s = pd.Series([1, 2, 3], index=['a', 'B', 'c'])
res: pd.Series = s.sort_index(key=lambda k: k.str.lower())
def test_types_sort_values() -> None:
s = pd.Series([4, 2, 1, 3])
res: pd.Series = s.sort_values(0)
res2: pd.Series = s.sort_values(ascending=False)
res3: None = s.sort_values(inplace=True, kind='quicksort')
res4: pd.Series = s.sort_values(na_position='last')
res5: pd.Series = s.sort_values(ignore_index=True)
# This was added in 1.1.0 https://pandas.pydata.org/docs/whatsnew/v1.1.0.html
def test_types_sort_values_with_key() -> None:
s = pd.Series([1, 2, 3], index=[2, 3, 1])
res: pd.Series = s.sort_values(key=lambda k: -k)
def test_types_shift() -> None:
s = pd.Series([1, 2, 3])
s.shift()
s.shift(axis=0, periods=1)
s.shift(-1, fill_value=0)
def test_types_rank() -> None:
s = pd.Series([1, 1, 2, 5, 6, np.nan, 'milion'])
s.rank()
s.rank(axis=0, na_option='bottom')
s.rank(method="min", pct=True)
s.rank(method="dense", ascending=True)
s.rank(method="first", numeric_only=True)
def test_types_mean() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.mean()
s1: pd.Series = s.mean(axis=0, level=0)
f2: float = s.mean(skipna=False)
f3: float = s.mean(numeric_only=False)
def test_types_median() -> None:
s = pd.Series([1, 2, 3, np.nan])
f1: float = s.median()
s1: pd.Series = s.median(axis=0, level=0)
f2: float = s.median(skipna=False)
f3: float = s.median(numeric_only=False)
def test_types_sum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.sum()
s.sum(axis=0, level=0)
s.sum(skipna=False)
s.sum(numeric_only=False)
s.sum(min_count=4)
def test_types_cumsum() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.cumsum()
s.cumsum(axis=0)
s.cumsum(skipna=False)
def test_types_min() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.min()
s.min(axis=0)
s.min(level=0)
s.min(skipna=False)
def test_types_max() -> None:
s = pd.Series([1, 2, 3, np.nan])
s.max()
s.max(axis=0)
s.max(level=0)
s.max(skipna=False)
def test_types_quantile() -> None:
s = pd.Series([1, 2, 3, 10])
s.quantile([0.25, 0.5])
s.quantile(0.75)
s.quantile()
s.quantile(interpolation='nearest')
def test_types_clip() -> None:
s = pd.Series([-10, 2, 3, 10])
s.clip(lower=0, upper=5)
s.clip(lower=0, upper=5, inplace=True)
def test_types_abs() -> None:
s = pd.Series([-10, 2, 3, 10])
s.abs()
def test_types_var() -> None:
s = pd.Series([-10, 2, 3, 10])
s.var()
s.var(axis=0, ddof=1)
s.var(skipna=True, numeric_only=False)
def test_types_std() -> None:
s = pd.Series([-10, 2, 3, 10])
s.std()
s.std(axis=0, ddof=1)
s.std(skipna=True, numeric_only=False)
def test_types_idxmin() -> None:
s = | pd.Series([-10, 2, 3, 10]) | pandas.Series |
import unittest
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from yitian.datasource import *
from yitian.datasource import preprocess
class Test(unittest.TestCase):
# def test_standardize_date(self):
# data_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['Trade Date', 'price'])
#
# expect_pd = pd.DataFrame([
# ['01/01/2019', 11.11],
# ['01/04/2019', 44.44],
# ['01/03/2019', 33.33],
# ['01/02/2019', 22.22]
# ], columns=['date', 'price'])
#
# assert_frame_equal(expect_pd, preprocess.standardize_date(data_pd))
#
# def test_standardize_date_with_multi_date_column(self):
# data_pd = pd.DataFrame([
# ['2019-01-01 00:00:00', '2019-01-01 00:00:00', 11.11],
# ['2019-01-02 00:00:00', '2019-01-01 00:00:00', 22.22],
# ['2019-01-03 00:00:00', '2019-01-01 00:00:00', 33.33],
# ['2019-01-04 00:00:00', '2019-01-01 00:00:00', 44.44],
# ], columns=['DATE', 'date', 'price'])
#
# with self.assertRaises(ValueError) as context:
# preprocess.standardize_date(data_pd)
#
# assert str(context.exception) == \
# str("Original cols ({cols}) cannot be reconnciled with date options ({option})"\
# .format(cols=data_pd.columns.tolist(), option=RAW_DATE_OPTIONS))
def test_create_ts_pd(self):
data_pd = pd.DataFrame([
['01/01/2019', 11.11],
['01/04/2019', 44.44],
['01/03/2019', 33.33],
['01/02/2019', 22.22]
], columns=['date', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-01-02'), 22.22],
[pd.Timestamp('2019-01-03'), 33.33],
[pd.Timestamp('2019-01-04'), 44.44]
], columns=['date', 'price']).set_index('date')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd))
def test_create_ts_pd_datetime(self):
data_pd = pd.DataFrame([
['2019-01-01 11:11:11', 11.11],
['2019-01-04 04:44:44', 44.44],
['2019-01-03 03:33:33', 33.33],
['2019-01-02 22:22:22', 22.22]
], columns=['datetime', 'price'])
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-01-02 22:22:22'), 22.22],
[pd.Timestamp('2019-01-03 03:33:33'), 33.33],
[pd.Timestamp('2019-01-04 04:44:44'), 44.44]
], columns=['datetime', 'price']).set_index('datetime')
assert_frame_equal(expect_pd, preprocess.create_ts_pd(data_pd, index_col=DATETIME))
def test_add_ymd(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11],
[pd.Timestamp('2019-02-02'), 22.22],
[pd.Timestamp('2019-03-03'), 33.33],
[pd.Timestamp('2019-04-04'), 44.44]
], columns=['date', 'price']).set_index('date')
expect_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01'), 11.11, 2019, 1, 1],
[pd.Timestamp('2019-02-02'), 22.22, 2019, 2, 2],
[pd.Timestamp('2019-03-03'), 33.33, 2019, 3, 3],
[pd.Timestamp('2019-04-04'), 44.44, 2019, 4, 4]
], columns=['date', 'price', 'year', 'month', 'day']).set_index('date')
assert_frame_equal(expect_pd, preprocess.add_ymd(data_pd))
def test_add_ymd_datetime(self):
data_pd = pd.DataFrame([
[pd.Timestamp('2019-01-01 11:11:11'), 11.11],
[pd.Timestamp('2019-02-02 22:22:22'), 22.22],
[pd.Timestamp('2019-03-03 03:33:33'), 33.33],
[ | pd.Timestamp('2019-04-04 04:44:44') | pandas.Timestamp |
# coding=utf-8
#
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis-python
#
# Most of this work is copyright (C) 2013-2018 <NAME>
# (<EMAIL>), but it contains contributions by others. See
# CONTRIBUTING.rst for a full list of people who may hold copyright, and
# consult the git log if you need to determine who owns an individual
# contribution.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
#
# END HEADER
from __future__ import division, print_function, absolute_import
from copy import copy
from collections import Iterable, OrderedDict
import attr
import numpy as np
import pandas
import hypothesis.strategies as st
import hypothesis.extra.numpy as npst
import hypothesis.internal.conjecture.utils as cu
from hypothesis.errors import InvalidArgument
from hypothesis.control import reject
from hypothesis.strategies import check_strategy
from hypothesis.internal.compat import hrange
from hypothesis.internal.coverage import check, check_function
from hypothesis.internal.validation import check_type, try_convert, \
check_valid_size, check_valid_interval
try:
from pandas.api.types import is_categorical_dtype
except ImportError: # pragma: no cover
def is_categorical_dtype(dt):
if isinstance(dt, np.dtype):
return False
return dt == 'category'
if False:
from typing import Any, Union, Sequence, Set # noqa
from hypothesis.searchstrategy.strategies import Ex # noqa
def dtype_for_elements_strategy(s):
return st.shared(
s.map(lambda x: pandas.Series([x]).dtype),
key=('hypothesis.extra.pandas.dtype_for_elements_strategy', s),
)
def infer_dtype_if_necessary(dtype, values, elements, draw):
if dtype is None and not values:
return draw(dtype_for_elements_strategy(elements))
return dtype
@check_function
def elements_and_dtype(elements, dtype, source=None):
if source is None:
prefix = ''
else:
prefix = '%s.' % (source,)
if elements is not None:
check_strategy(elements, '%selements' % (prefix,))
else:
with check('dtype is not None'):
if dtype is None:
raise InvalidArgument((
'At least one of %(prefix)selements or %(prefix)sdtype '
'must be provided.') % {'prefix': prefix})
with check('is_categorical_dtype'):
if | is_categorical_dtype(dtype) | pandas.api.types.is_categorical_dtype |
import os
import numpy as np
import pandas as pd
import torch
from skimage import io, img_as_uint
from skimage.morphology import skeletonize_3d
from numbers import Number
from itertools import product
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torch.nn.functional import cross_entropy
from torch.nn.modules.loss import _WeightedLoss
from torchvision import transforms as T
from torchvision.transforms import functional as F
from .rprops import get_hypo_rprops, visualize_regions
def chk_mkdir(*args):
for path in args:
if path is not None and not os.path.exists(path):
os.makedirs(path)
def dpi_to_dpm(dpi):
# small hack, default value for dpi is False
if not dpi:
return False
return dpi/25.4
def dpm_to_dpi(dpm):
if not dpm:
return False
return dpm * 25.4
def to_long_tensor(pic):
# handle numpy array
img = torch.from_numpy(np.array(pic, np.uint8))
# backward compatibility
return img.long()
def joint_to_long_tensor(image, mask):
return to_long_tensor(image), to_long_tensor(mask)
def make_transform(
crop=(256, 256), p_flip=0.5, p_color=0.0, color_jitter_params=(0.1, 0.1, 0.1, 0.1),
p_random_affine=0.0, rotate_range=False, normalize=False, long_mask=False
):
if color_jitter_params is not None:
color_tf = T.ColorJitter(*color_jitter_params)
else:
color_tf = None
if normalize:
tf_normalize = T.Normalize(mean=(0.5, 0.5, 0.5), std=(1, 1, 1))
def joint_transform(image, mask):
# transforming to PIL image
image, mask = F.to_pil_image(image), F.to_pil_image(mask)
# random crop
if crop:
i, j, h, w = T.RandomCrop.get_params(image, crop)
image, mask = F.crop(image, i, j, h, w), F.crop(mask, i, j, h, w)
if np.random.rand() < p_flip:
image, mask = F.hflip(image), F.hflip(mask)
# color transforms || ONLY ON IMAGE
if color_tf is not None:
if np.random.rand() < p_color:
image = color_tf(image)
# random rotation
if rotate_range and not p_random_affine:
if np.random.rand() < 0.5:
angle = rotate_range * (np.random.rand() - 0.5)
image, mask = F.rotate(image, angle), F.rotate(mask, angle)
# random affine
if np.random.rand() < p_random_affine:
affine_params = T.RandomAffine(180).get_params((-90, 90), (1, 1), (2, 2), (-45, 45), crop)
image, mask = F.affine(image, *affine_params), F.affine(mask, *affine_params)
# transforming to tensor
image = F.to_tensor(image)
if not long_mask:
mask = F.to_tensor(mask)
else:
mask = to_long_tensor(mask)
# normalizing image
if normalize:
image = tf_normalize(image)
return image, mask
return joint_transform
def confusion_matrix(prediction, target, n_classes):
"""
prediction, target: torch.Tensor objects
"""
prediction = torch.argmax(prediction, dim=0).long()
target = torch.squeeze(target, dim=0)
conf_mtx = torch.zeros(n_classes, n_classes).long()
for i, j in product(range(n_classes), range(n_classes)):
conf_mtx[i, j] = torch.sum((prediction == j) * (target == i))
return conf_mtx
class SoftDiceLoss(_WeightedLoss):
__constants__ = ['weight', 'reduction']
def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean'):
if weight is None:
weight = torch.tensor(1)
else:
# creating tensor if needed
if not isinstance(weight, torch.Tensor):
weight = torch.tensor(weight)
# normalizing weights
weight /= torch.sum(weight)
super(SoftDiceLoss, self).__init__(weight, size_average, reduce, reduction)
def forward(self, y_pred, y_gt):
"""
Args:
y_pred: torch.Tensor of shape (n_batch, n_classes, image.shape)
y_gt: torch.LongTensor of shape (n_batch, image.shape)
"""
dims = (0, *range(2, len(y_pred.shape)))
y_gt = torch.zeros_like(y_pred).scatter_(1, y_gt[:, None, :], 1)
numerator = 2 * torch.sum(y_pred * y_gt, dim=dims)
denominator = torch.sum(y_pred * y_pred + y_gt * y_gt, dim=dims)
return torch.sum((1 - numerator / denominator)*self.weight)
class LogNLLLoss(_WeightedLoss):
__constants__ = ['weight', 'reduction', 'ignore_index']
def __init__(self, weight=None, size_average=None, reduce=None, reduction='mean',
ignore_index=-100):
super(LogNLLLoss, self).__init__(weight, size_average, reduce, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
input = torch.log(input)
return cross_entropy(input, target, weight=self.weight, reduction=self.reduction,
ignore_index=self.ignore_index)
class ReadTrainDataset(Dataset):
"""
Structure of the dataset should be:
dataset_path
|--images
|--img001.png
|--img002.png
|--masks
|--img001.png
|--img002.png
"""
def __init__(self, dataset_path, transform=None, one_hot_mask=False, long_mask=True):
self.dataset_path = dataset_path
self.images_path = os.path.join(dataset_path, 'images')
self.masks_path = os.path.join(dataset_path, 'masks')
self.images_list = os.listdir(self.images_path)
self.transform = transform
self.one_hot_mask = one_hot_mask
self.long_mask = long_mask
def __len__(self):
return len(os.listdir(self.images_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
image = io.imread(os.path.join(self.images_path, image_filename))
mask = io.imread(os.path.join(self.masks_path, image_filename))
if len(mask.shape) == 2:
mask = np.expand_dims(mask, axis=2)
if self.transform:
image, mask = self.transform(image, mask)
else:
image = F.to_tensor(image)
if self.long_mask:
mask = to_long_tensor(F.to_pil_image(mask))
else:
mask = F.to_tensor(mask)
if self.one_hot_mask:
assert self.one_hot_mask >= 0, 'one_hot_mask must be nonnegative'
mask = torch.zeros((self.one_hot_mask, mask.shape[1], mask.shape[2])).scatter_(0, mask.long(), 1)
return image, mask, image_filename
class ReadTestDataset(Dataset):
"""
Structure of the dataset should be:
dataset_path
|--images
|--img001.png
|--img002.png
"""
def __init__(self, dataset_path, transform=None):
self.dataset_path = dataset_path
self.images_path = os.path.join(dataset_path, 'images')
self.images_list = os.listdir(self.images_path)
self.transform = transform
def __len__(self):
return len(os.listdir(self.images_path))
def __getitem__(self, idx):
image_filename = self.images_list[idx]
image = io.imread(os.path.join(self.images_path, image_filename))
if self.transform:
image = self.transform(image)
else:
image = F.to_tensor(image)
return image, image_filename
class ModelWrapper:
def __init__(
self, model, results_folder, loss=None, optimizer=None,
scheduler=None, cuda_device=None
):
self.model = model
self.loss = loss
self.optimizer = optimizer
self.scheduler = scheduler
self.results_folder = results_folder
chk_mkdir(self.results_folder)
self.cuda_device = cuda_device
if self.cuda_device:
self.model.to(device=self.cuda_device)
try:
self.loss.to(device=self.cuda_device)
except AttributeError:
pass
def train_model(self, dataset, n_epochs, n_batch=1, verbose=False,
validation_dataset=None, prediction_dataset=None,
save_freq=100):
self.model.train(True)
# logging losses
loss_df = pd.DataFrame(np.zeros(shape=(n_epochs, 2)), columns=['train', 'validate'], index=range(n_epochs))
min_loss = np.inf
total_running_loss = 0
for epoch_idx in range(n_epochs):
epoch_running_loss = 0
for batch_idx, (X_batch, y_batch, name) in enumerate(DataLoader(dataset, batch_size=n_batch, shuffle=True)):
if self.cuda_device:
X_batch = Variable(X_batch.to(device=self.cuda_device))
y_batch = Variable(y_batch.to(device=self.cuda_device))
else:
X_batch, y_batch = Variable(X_batch), Variable(y_batch)
# training
self.optimizer.zero_grad()
y_out = self.model(X_batch)
training_loss = self.loss(y_out, y_batch)
training_loss.backward()
self.optimizer.step()
epoch_running_loss += training_loss.item()
if verbose:
print('(Epoch no. %d, batch no. %d) loss: %f' % (epoch_idx, batch_idx, training_loss.item()))
total_running_loss += epoch_running_loss/(batch_idx + 1)
print('(Epoch no. %d) loss: %f' % (epoch_idx, epoch_running_loss/(batch_idx + 1)))
loss_df.loc[epoch_idx, 'train'] = epoch_running_loss/(batch_idx + 1)
if validation_dataset is not None:
validation_error = self.validate(validation_dataset, n_batch=1)
loss_df.loc[epoch_idx, 'validate'] = validation_error
if validation_error < min_loss:
torch.save(self.model.state_dict(), os.path.join(self.results_folder, 'model'))
print('Validation loss improved from %f to %f, model saved to %s'
% (min_loss, validation_error, self.results_folder))
min_loss = validation_error
if self.scheduler is not None:
self.scheduler.step(validation_error)
else:
if epoch_running_loss/(batch_idx + 1) < min_loss:
torch.save(self.model.state_dict(), os.path.join(self.results_folder, 'model'))
print('Training loss improved from %f to %f, model saved to %s'
% (min_loss, epoch_running_loss / (batch_idx + 1), self.results_folder))
min_loss = epoch_running_loss / (batch_idx + 1)
if self.scheduler is not None:
self.scheduler.step(epoch_running_loss / (batch_idx + 1))
# saving model and logs
loss_df.to_csv(os.path.join(self.results_folder, 'loss.csv'))
if epoch_idx % save_freq == 0:
epoch_save_path = os.path.join(self.results_folder, '%d' % epoch_idx)
chk_mkdir(epoch_save_path)
torch.save(self.model.state_dict(), os.path.join(epoch_save_path, 'model'))
if prediction_dataset:
self.predict_large_images(prediction_dataset, epoch_save_path)
self.model.train(False)
del X_batch, y_batch
return total_running_loss/n_batch
def validate(self, dataset, n_batch=1):
self.model.train(False)
total_running_loss = 0
for batch_idx, (X_batch, y_batch, name) in enumerate(DataLoader(dataset, batch_size=n_batch, shuffle=False)):
if self.cuda_device:
X_batch = Variable(X_batch.to(device=self.cuda_device))
y_batch = Variable(y_batch.to(device=self.cuda_device))
else:
X_batch, y_batch = Variable(X_batch), Variable(y_batch)
y_out = self.model(X_batch)
training_loss = self.loss(y_out, y_batch)
total_running_loss += training_loss.item()
print('Validation loss: %f' % (total_running_loss / (batch_idx + 1)))
self.model.train(True)
del X_batch, y_batch
return total_running_loss/(batch_idx + 1)
def predict(self, dataset, export_path, channel=None):
self.model.train(False)
chk_mkdir(export_path)
for batch_idx, (X_batch, image_filename) in enumerate(DataLoader(dataset, batch_size=1)):
if self.cuda_device:
X_batch = Variable(X_batch.to(device=self.cuda_device))
y_out = self.model(X_batch).cpu().data.numpy()
else:
X_batch = Variable(X_batch)
y_out = self.model(X_batch).data.numpy()
if channel:
try:
io.imsave(os.path.join(export_path, image_filename[0]), y_out[0, channel, :, :])
except:
print('something went wrong upon prediction')
else:
try:
io.imsave(os.path.join(export_path, image_filename[0]), y_out[0, :, :, :].transpose((1, 2, 0)))
except:
print('something went wrong upon prediction')
def predict_large_images(self, dataset, export_path=None, channel=None, tile_res=(512, 512)):
self.model.train(False)
if export_path:
chk_mkdir(export_path)
else:
results = []
for batch_idx, (X_batch, image_filename) in enumerate(DataLoader(dataset, batch_size=1)):
out = self.predict_single_large_image(X_batch, channel=channel, tile_res=tile_res)
if export_path:
io.imsave(os.path.join(export_path, image_filename[0]), out)
else:
results.append(out)
if not export_path:
return results
def predict_single_large_image(self, X_image, channel=None, tile_res=(512, 512)):
image_res = X_image.shape
# placeholder for output
y_out_full = np.zeros(shape=(1, 3, image_res[2], image_res[3]))
# generate tile coordinates
tile_x = list(range(0, image_res[2], tile_res[0]))[:-1] + [image_res[2] - tile_res[0]]
tile_y = list(range(0, image_res[3], tile_res[1]))[:-1] + [image_res[3] - tile_res[1]]
tile = product(tile_x, tile_y)
# predictions
for slice in tile:
if self.cuda_device:
X_in = X_image[:, :, slice[0]:slice[0] + tile_res[0], slice[1]:slice[1] + tile_res[1]].to(
device=self.cuda_device)
X_in = Variable(X_in)
else:
X_in = X_image[:, :, slice[0]:slice[0] + tile_res[0], slice[1]:slice[1] + tile_res[1]]
X_in = Variable(X_in)
y_out = self.model(X_in).cpu().data.numpy()
y_out_full[0, :, slice[0]:slice[0] + tile_res[0], slice[1]:slice[1] + tile_res[1]] = y_out
# save image
if channel:
out = y_out_full[0, channel, :, :]
else:
out = y_out_full[0, :, :, :].transpose((1, 2, 0))
return out
def measure_large_images(self, dataset, visualize_bboxes=False, filter=True, export_path=None,
skeleton_method=skeletonize_3d, dpm=False, verbose=False, tile_res=(512, 512)):
hypocotyl_lengths = dict()
chk_mkdir(export_path)
assert any(isinstance(dpm, tp) for tp in [str, bool, Number]), 'dpm must be string, bool or Number'
for batch_idx, (X_batch, image_filename) in enumerate(DataLoader(dataset, batch_size=1)):
if verbose:
print("Measuring %s" % image_filename[0])
hypo_segmented = self.predict_single_large_image(X_batch, tile_res=tile_res)
hypo_segmented_mask = hypo_segmented[:, :, 2]
hypo_result, hypo_skeleton = get_hypo_rprops(hypo_segmented_mask, filter=filter, return_skeleton=True,
skeleton_method=skeleton_method,
dpm=dpm)
hypo_df = hypo_result.make_df()
hypocotyl_lengths[image_filename] = hypo_df
if export_path:
if visualize_bboxes:
hypo_img = X_batch[0].cpu().data.numpy().transpose((1, 2, 0))
# original image
visualize_regions(hypo_img, hypo_result,
os.path.join(export_path, image_filename[0][:-4] + '.png'))
# segmentation
visualize_regions(hypo_segmented, hypo_result,
os.path.join(export_path, image_filename[0][:-4] + '_segmentation.png'),
bbox_color='0.5')
# skeletonization
visualize_regions(hypo_skeleton, hypo_result,
os.path.join(export_path, image_filename[0][:-4] + '_skeleton.png'))
hypocotyl_lengths[image_filename].to_csv(os.path.join(export_path, image_filename[0][:-4] + '.csv'),
header=True, index=True)
return hypocotyl_lengths
def score_large_images(self, dataset, export_path, visualize_bboxes=False, visualize_histograms=False,
visualize_segmentation=False,
filter=True, skeletonized_gt=False, match_threshold=0.5, tile_res=(512, 512),
dpm=False):
chk_mkdir(export_path)
scores = {}
assert any(isinstance(dpm, tp) for tp in [str, bool, Number]), 'dpm must be string, bool or Number'
if isinstance(dpm, str):
dpm_df = pd.read_csv(dpm, header=None, index_col=0)
for batch_idx, (X_batch, y_batch, image_filename) in enumerate(DataLoader(dataset, batch_size=1)):
if isinstance(dpm, str):
dpm_val = dpm_df.loc[image_filename].values[0]
elif isinstance(dpm, Number) or dpm == False:
dpm_val = dpm
else:
raise ValueError('dpm must be str, Number or False')
# getting filter range
if isinstance(filter, dict):
filter_val = filter[image_filename[0]]
else:
filter_val = filter
segmented_img = self.predict_single_large_image(X_batch, tile_res=tile_res)
hypo_result_mask = segmented_img[:, :, 2]
hypo_result, hypo_result_skeleton = get_hypo_rprops(hypo_result_mask, filter=filter_val,
return_skeleton=True, dpm=dpm_val)
hypo_result.make_df().to_csv(os.path.join(export_path, image_filename[0][:-4] + '_result.csv'))
if visualize_segmentation:
io.imsave(os.path.join(export_path, image_filename[0][:-4] + '_segmentation_skeletons.png'),
img_as_uint(hypo_result_skeleton))
io.imsave(os.path.join(export_path, image_filename[0][:-4] + '_segmentation_hypo.png'),
hypo_result_mask)
io.imsave(os.path.join(export_path, image_filename[0][:-4] + '_segmentation_full.png'),
segmented_img)
if not skeletonized_gt:
hypo_gt_mask = y_batch[0].data.numpy() == 2
else:
hypo_gt_mask = y_batch[0].data.numpy() > 0
hypo_result_gt = get_hypo_rprops(hypo_gt_mask, filter=[20/dpm_val, np.inf],
already_skeletonized=skeletonized_gt, dpm=dpm_val)
hypo_result_gt.make_df().to_csv(os.path.join(export_path, image_filename[0][:-4] + '_gt.csv'))
scores[image_filename[0]], objectwise_df = hypo_result.score(hypo_result_gt,
match_threshold=match_threshold)
objectwise_df.to_csv(os.path.join(export_path, image_filename[0][:-4] + '_matched.csv'))
# visualization
# histograms
if visualize_histograms:
hypo_result.hist(hypo_result_gt,
os.path.join(export_path, image_filename[0][:-4] + '_hist.png'))
# bounding boxes
if visualize_bboxes:
visualize_regions(hypo_gt_mask, hypo_result_gt,
export_path=os.path.join(export_path, image_filename[0][:-4] + '_gt.png'))
visualize_regions(hypo_result_skeleton, hypo_result,
export_path=os.path.join(export_path, image_filename[0][:-4] + '_result.png'))
score_df = | pd.DataFrame(scores) | pandas.DataFrame |
import os
import re
import json
import numpy as np
import pandas as pd
import operator
import base64
os.environ['DJANGO_SETTINGS_MODULE'] = 'zazz_site.settings'
import django
django.setup()
from django.core.exceptions import ObjectDoesNotExist
from django.core import serializers
from zazz import models
from time import gmtime, strftime
from functools import reduce
from itertools import product
from collections import OrderedDict, defaultdict
from zazz.models import Samples
print ('OFFLINE:')
g = {
}
import_errors = defaultdict(int)
'''
class Mutations(models.Model):
vep = models.ManyToManyField(to="VEP")
name = models.CharField(null=False, max_length=100)
alternative = models.CharField(null=True, max_length=100)
reference = models.CharField(null=True, max_length=100)
this_type = models.CharField(null=False, choices=[('name', 'GENERIC'), ('rs_name', 'rs'), ('hgvs_name', 'hgvs')], max_length=100)
'''
class ZazzException(Exception):
def set_info(self, info):
self.info = info
def convert_to_base64(s):
return base64.b64encode(bytes(s, encoding='ascii')).decode()
def decode_base64_json(s):
return json.loads(base64.b64decode(s.replace('_', '=')))
def print_now():
return strftime("%Y-%m-%d %H:%M:%S", gmtime())
def get_model(name):
return getattr(models, name)
def create_field_parameters(parameters):
return ', '.join(['{k} = {v}'.format(k=k,v=v) for k,v in parameters.items()])
def create_field(field):
# if field['type'] in ['MultiSelectField']:
# this_models = ''
# else:
# this_models = 'models.'
this_models = 'models.'
return ' {name} = {this_models}{type_}({parameters})'.format(
name=field['name'].replace(' ', '_'),
this_models=this_models,
type_ = field['type'],
parameters = create_field_parameters(field['parameters']),
)
def create_fields(fields):
return '\n'.join([create_field(field) for field in fields])
def get_table_pattern():
table_pattern = '''
class {table}(models.Model):
{meta_val}
{fields_val}
'''
return table_pattern
def table_pattern_f(table, fields_val, meta_val=''):
table_pattern = get_table_pattern()
return table_pattern.format(table=table, fields_val=fields_val, meta_val=meta_val)
def create_external(external):
#Create main
table = external['name']
#fields_keys = [x for x in external['fields'] if x['name'] in external['keys']]
fields_keys = external['fields']
fields_val = create_fields(fields_keys)
ret = table_pattern_f(table=table, fields_val=fields_val)
#Create secondary
return ret
def create_externals(externals):
'''
externals = [
{'name': 'Clinvar', 'filename': 'clinvar.csv', 'type': 'csv', 'fields':
[
{'name': 'Chromosome', 'type': 'CharField', 'parameters': {'max_length': '100'}},
{'name': 'Position', 'type': 'IntegerField', 'parameters': {}},
{'name': 'Clinical Significance', 'type': 'CharField', 'parameters': {'max_length': '100'}},
],
'keys': ['Chromosome', 'Position'],
},
]
'''
return '\n'.join(map(create_external, externals))
def create_table(table, fields, externals):
'''
table: Name of main table
fields: list fields that describe the database
'''
Many2ManyTables = {}
for field in fields:
#if field.get('table', False):
if field.get('database', False) == 'multi_1':
f_table = field['table']
if not f_table in Many2ManyTables:
Many2ManyTables[f_table] = []
Many2ManyTables[f_table].append(field)
'''
Many2ManyTables is a dictionary.
keys: are name of tables that we group fields together
values is a list of these fields
'''
# Transform Many2ManyTables to django tables format
Many2ManyTables_text = '\n'.join([table_pattern_f(k,create_fields(v)) for k,v in Many2ManyTables.items()])
# Add the "normal" fields (not Many2Many)
new_fields = [field for field in fields if field.get('database', False) != 'multi_1']
#Add fields for ManyToMany
#The main table needs to have a ManytoMany relationship with the Samples table
new_fields += [{'name': k, 'type': 'ManyToManyField', 'parameters': {'to': k}} for k,v in Many2ManyTables.items()]
#We also need to add a "raw" field for each many2many relationship
#We may have to remove this on the furture!
for k,v in Many2ManyTables.items():
for f in v:
# f = {'name': 'Sift', 'col_name': 'sift', 'type': 'FloatField', 'parameters': {'null': 'True'}, 'xUnits': 20, 'database': 'multi', 'l': <function import_annotated_vcf.<locals>.<lambda> at 0x116418488>, 'l_multi': <function splitUnique.<locals>.f at 0x116418510>, 'table': 'Transcripts', 'order': 21}
#print (f)
field_to_add = dict(f)
# All raw fields should be CharFields !
if field_to_add['type'] != 'CharField':
field_to_add['type'] = 'CharField'
field_to_add['parameters']['max_length'] = '200'
field_to_add['name'] += '_raw'
new_fields.append(field_to_add)
# Create a multi field index
meta_val = '''
class Meta:
indexes = [
models.Index(
fields=['Chromosome', 'Position', 'Reference', 'Alternative'],
name='sample_idx',
),
]
'''
table_text = table_pattern_f(table=table, fields_val = create_fields(new_fields), meta_val=meta_val)
# print (table_text)
# a=1/0
models_pattern = '''
from django.db import models
# from multiselectfield import MultiSelectField
# Create your models here.
{Many2ManyTables}
class Data(models.Model):
field = models.CharField(null=True, max_length=200)
{table}
{externals}
'''
externals_text = create_externals(externals)
models_text = models_pattern.format(table=table_text, Many2ManyTables=Many2ManyTables_text, externals=externals_text)
print ('NEW MODELS:')
print (models_text)
print ('Saving to zazz/models.py..')
with open('zazz/models.py', 'w') as f:
f.write(models_text)
print ('..DONE')
print ('Running: python manage.py makemigrations ...')
command = 'python manage.py makemigrations zazz'
os.system(command)
print (' ..DONE')
print ('Running: python manage.py migrate')
command = 'python manage.py migrate'
os.system(command)
print(' ..DONE')
#print (Data.objects.all())
#df = pd.read_excel('annotations_zaganas.xlsx')
#print (df[:3])
#print ()
#print ("python manage.py makemigrations")
#print ("python manage.py migrate")
def create_js_field(field):
'''
IGNORE = DO NOT SHOW IN UI
'''
pattern = "{{'name': '{name}', 'type': '{type}', 'selected': false, 'e_order': -1, 'database': '{database}', {special}{renderer}{table}{xUnits}{order}{include} }}"
database = field.get('database', 'normal');
xUnits = ''
if field.get('component') == 'freetext':
type_ = 'freetext'
special = "'text' : ''" # The ng-model
elif field.get('component') == 'ignore':
type_ = 'ignore'
special = "'text' : ''" # The ng-model
elif field['type'] in ['CharField', 'ManyToManyField']:
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
elif field['type'] in ['IntegerField', 'FloatField']:
type_ = 'slider'
special = ''''slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
}'''
if field['type'] == 'IntegerField':
if not 'xUnits' in field:
raise ZazzException('xUnit missing from IntegerField')
xUnits = ", 'xUnits': " + str(field['xUnits'])
elif field['type'] == 'ForeignKey':
type_ = 'checkbox'
special = "'itemArray': [], 'selected2': ['ALL']"
else:
raise ZazzException('Unknown field: {}'.format(field['type']))
if 'renderer' in field:
renderer = ", 'renderer': " + field['renderer']
else:
renderer = ''
if 'table' in field:
table = ", 'table': '" + field['table'] + "'"
else:
table = ''
if 'order' in field:
order = ", 'order': " + str(field['order'])
else:
order = ''
if 'include' in field:
include = ", 'include': " + str(field['include'])
else:
include = ''
values = {
'name': field['name'],
'type': type_,
'special': special,
'database': database,
'renderer': renderer,
'table': table,
'order': order,
'include': include,
'xUnits': xUnits,
}
return pattern.format(**values)
def create_js_fields(fields):
return ',\n'.join([create_js_field(x) for x in fields])
def create_js(fields):
'''
$scope.fields = [
//{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [{id: 1, name: ''}], 'selected2': {'value': {id: 1, name: ''}} },
{'name': 'sample', 'type': 'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 },
{'name': 'Bases', 'type': 'slider', 'selected': false, 'slider': {
'min': 30,
'max': 70,
'options': {
'floor': 1,
'ceil': 100,
'disabled': true,
'onEnd' : function (sliderId, modelValue, highValue, pointerType) {
console.log('Slider changed');
//console.log(modelValue); // This the min
//console.log(highValue); // This is the max
$scope.update_table();
}
},
},
'e_order': -1},
{'name':'Barcode_Name', 'type':'checkbox', 'selected': false, 'itemArray': [], 'selected2': ['ALL'], 'e_order': -1 }
];
'''
print ('JAVASCRIPT:')
fields_val = f'$scope.fields=[{create_js_fields(fields)}];'
print (fields_val)
# Add fields javascript object in angular controller
z_zazz_ctrl_fn = 'zazz/static/zazz/zazz_Ctrl.js'
with open(z_zazz_ctrl_fn) as f:
z_zazz_ctrl = f.read()
z_zazz_ctrl_new = re.sub(
r'// FIELDS BEGIN\n.+\n// FIELDS END\n',
f'// FIELDS BEGIN\n{fields_val}\n// FIELDS END\n',
z_zazz_ctrl,
flags=re.DOTALL )
with open(z_zazz_ctrl_fn, 'w') as f:
f.write(z_zazz_ctrl_new + '\n')
print ('Javed javascript at:', z_zazz_ctrl_fn)
def is_dataframe(data):
'''
Return true if data is a pandas dataFrame
'''
return type(data) is pd.DataFrame
def chromosome_unifier(chromosome):
'''
All chromosome input should pass from this function.
Chromosome can be declared in multiple ways.. "1", chr1, chr01, ...
Here we make sure that all chromosome values are in the form chr1, chr2, chrX, chrY
'''
# "15" --> chr15
if re.match(r'^\d+$', chromosome):
return 'chr' + chromosome
if re.match(r'^chr[\dXY]+$', chromosome):
return chromosome
if chromosome.upper() in ['X', 'Y']:
return 'chr' + chromosome.lower()
raise ZazzException(f'Unknown Chromosome value: ->{chromosome}<-')
def get_value_from_record(field, record, line_index):
'''
Extract the value that is present in the record and is described in the field
field : Any item in fields list. field is a dictionary
record: Any item in input data.
DUPLICATE CODE!!
FIX ME!!
'''
if not field['col_name'] in record:
message = '{} does not exist in record\n'.format(field['col_name'])
message += 'Available columns:\n'
message += '\n'.join(record.keys()) + '\n'
raise ZazzException(message)
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
value = field['l'](record[field['col_name']])
else:
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
return value
def get_key_from_record(field):
'''
Get the name of the key of the record
'''
key = field['name']
if field.get('database', '') == 'multi_2':
pass
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
return key
def create_m2m_table(schema, table):
'''
Create a dictionary with all the Many2Many tables.
Example: {'phylop', 'pfam', 'drugbank', 'go', 'dbsnp', 'omim', 'cosmic', 'Transcripts'}
key: multi_1 table
values: list with all column names.
'''
m2m_tables = defaultdict(list)
for field in schema:
if field.get('database', '') == 'multi_1':
#m2m_tables.add(field.get('table', table))
m2m_tables[field.get('table', table)].append(field)
return m2m_tables
def get_multi_1_records(m2m_tables, record, ):
'''
example of field:
{'name': 'ANN_GeneDetail_refGene', 'col_name': 'GeneDetail.refGene', 'type': 'CharField', 'parameters': {'max_length': '500', 'null': 'True'}, 'database': 'multi_1', 'table': 'ANN_GeneDetail_refGene', 'l_multi': lambda x : x.replace('\\x3d', '=').split('\\x3b'), 'order': 38},
Returns:
ret:
{
'nameof_m2m_tale' : {
m2m_field_1: [list of values],
m2m_field_2: [list of values],
}
}
ret_raw:
{
'nameof_m2m_tale' : {
m2m_field_1: raw_values,
m2m_field_2: raw_values,
}
}
'''
ret = defaultdict(dict)
ret_raw = defaultdict(dict)
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
#print ('*** FIELD: ***')
#print (field)
unsplitted = record[field['col_name']]
splited_values = field['l_multi'](unsplitted)
ret[m2m_table_key][field['name']] = splited_values
if 'l_raw_multi' in field:
ret_raw[m2m_table_key][field['name'] + '_raw'] = field['l_raw_multi'](splited_values)
else:
ret_raw[m2m_table_key][field['name'] + '_raw'] = unsplitted
#print (ret)
#a=1/0
return ret, ret_raw
def create_attribute_records(record_list):
'''
record_list:
{'k': [1,2,3], 'l': [4,5,6]}
RETURNS:
[{'k': 1, 'l': 4}, {'k': 2, 'l': 5}, {'k':3, 'l': 6}]
'''
return [dict(zip(record_list.keys(), x)) for x in zip(*record_list.values())]
def import_data_append(input_data, schema, table, externals, **kwargs):
'''
Append new data
kwargs:
to_append_re : Regular expression to match new field names
'''
# Get kwargs
to_append_re = kwargs.get('to_append_re', None)
assert to_append_re
# Get table
table_db = getattr(models, table)
# Check type of input data
if is_dataframe(input_data):
data = input_data.to_dict('records')
elif type(input_data) is dict:
data = input_data
else:
raise ZazzException('input_data is not a pandas dataframe or a dictionary')
#Get the new fields that we will add.
print ('Selecting only fields according to regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
fields = [field for field in schema if re.match(to_append_re, field['name'])]
print ('Fields after selection: {}'.format(len(fields)))
assert len(fields)
print ('APPENDING NEW FIELDS:')
for field in fields:
print (' ' + field['name'])
# Get m2m_table:
m2m_tables = create_m2m_table(fields, table)
#print (m2m_tables)
#a=1/0
this_error = defaultdict(int)
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
try:
database_record = table_db.objects.get(Position=record['Position'], Chromosome=record['Chromosome'], Reference=record['Reference'], Alternative=record['Alternative'])
except ObjectDoesNotExist as e:
this_error['Could not find chromosome/position in db'] += 1
continue
for field in fields:
value = get_value_from_record(field, record, line_index)
key = get_key_from_record(field)
#print ('{}={}'.format(field['name'], value))
setattr(database_record, key, value)
#database_record.save()
# Get multi_1 records:
#print ('GeneDetail.refGene = ', record['GeneDetail.refGene'])
multi_1_records, multi_1_records_raw = get_multi_1_records(m2m_tables, record)
#print ('*** multi_1_records: ***')
#print (multi_1_records)
#print ('*** multi_1_records_raw: ***')
#print (multi_1_records_raw)
# Store multi records
for m2m_table_key, m2m_table_value in m2m_tables.items():
for field in m2m_table_value:
# Add raw multi_1 records
setattr(database_record, field['name'] + '_raw', multi_1_records_raw[m2m_table_key][field['name'] + '_raw'])
#print (database_record)
#print (field['name'] + '_raw')
#print (multi_1_records[m2m_table_key][field['name'] + '_raw'])
#Create attribute dictionary
attribute_records = create_attribute_records(multi_1_records[m2m_table_key])
#print ('*** attribute_records ***')
#print (attribute_records)
m2m_objects = [getattr(models, m2m_table_key).objects.get_or_create(**attribute_record)[0] for attribute_record in attribute_records]
getattr(getattr(database_record, m2m_table_key), 'set')(m2m_objects)
database_record.save()
print ('IMPORT ERRORS ERRORS:')
print (json.dumps(this_error, indent=4))
def import_data(input_data, schema, table, externals, delete=True, **kwargs):
'''
model_instances = [MyModel(
field_1=record['field_1'],
field_2=record['field_2'],
) for record in df_records]
'''
# Make sure that there is one and only one of the basic keys
chromosome_field = [x for x in schema if x['name'] == 'Chromosome']
position_field = [x for x in schema if x['name'] == 'Position']
reference_field = [x for x in schema if x['name'] == 'Reference']
alternative_field = [x for x in schema if x['name'] == 'Alternative']
assert len(chromosome_field) == 1
assert len(position_field) == 1
assert len(reference_field) == 1
assert len(alternative_field) == 1
chromosome_field = chromosome_field[0]
position_field = position_field[0]
reference_field = reference_field[0]
alternative_field = alternative_field[0]
errors_1 = 0
print ('Importing externals..')
if delete:
print ('Deleting external --> internal')
for external in externals:
if external['type'] == 'internal':
print (' Deleting external --> internal table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
print ('Deleting externals')
for external in externals:
if external['type'] == 'csv':
print (' Deleting external table: {}'.format(external['name']))
get_model(external['name']).objects.all().delete()
print (' Done')
if False:
'''
This is an initial effort. It is too slow.
It stores all info in DB. This is inefficient if we only need a fraction of information
'''
print ('Importing External Data')
for external in externals:
if external['type'] == 'csv':
print (' Name: {}'.format(external['name']))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'])
csv_dict = csv.to_dict('index')
print (' Length: {}'.format(len(csv_dict)))
c = 0
for index, d in csv_dict.items():
c += 1
if c % 1000 == 0:
print (' {}, Records: {}'.format(print_now(), c))
if c > 1000:
break
#Build a dictionary with the fields. NO M2M
item_fields_no_m2m = {field['name']:field['l'](d) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
new_item = get_model(external['name']).objects.get_or_create(**item_fields_no_m2m)[0]
#new_item.save()
# Build a dictionary with fields. WITH M2M
for field in external['fields']:
if field['type'] != 'ManyToManyField':
continue
item_fields_m2m = {field['name']:field['l'](d) for field in external['fields'] if field['type'] == 'ManyToManyField'}
for m2m_k, m2m_v in item_fields_m2m.items():
getattr(new_item, m2m_k).add(m2m_v)
new_item.save()
elif external['type'] == 'internal':
continue
print (' Done')
if is_dataframe(input_data):
df = input_data
elif type(input_data) is str:
input_data_ext = os.path.splitext(input_data)[1]
if input_data_ext == '.xlsx':
print ('Reading MAIN Excel: {}'.format(input_filename))
df = pd.read_excel(input_filename)
else:
raise Exception('Unknown file type: ', input_data_ext )
else:
raise Exception('Unknown input type', type(input_data).__name__)
if False:
print ('Keeping only 1000 records')
df = df[:1000]
data = df.to_dict('records')
table_db = getattr(models, table)
if delete:
print ('Deleting all..')
print ('Deleting table.. ', table)
table_db.objects.all().delete()
# Get the new fields that we will add.
to_append_re = kwargs.get('to_append_re')
if to_append_re:
print ('Adding only fields that match regexp: {}'.format(to_append_re))
print ('Total fields: {}'.format(len(schema)))
schema = [field for field in schema if re.match(to_append_re, field['name'])]
# Add basic fields as well
schema.extend([chromosome_field, position_field, reference_field, alternative_field])
print ('After regexp: {}'.format(len(schema)))
m2m_tables = set()
for field in schema:
if field.get('database', '') == 'multi_1':
m2m_tables.add(field.get('table', table))
if delete:
for m2m_table in m2m_tables:
print ('Deleting table.. ', m2m_table)
mm_db = getattr(models, m2m_table)
mm_db.objects.all().delete()
#(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']]))
print ('Building instances..')
if False:
instances = [
table_db(**{
field['name'] + ('_raw' if field.get('table', table) != table else ''):
(field['line_l'](record)) if 'line_l' in field else (field.get('l', lambda l:l)(record[field['col_name']])) #(field['l'] if 'l' in field else lambda x:x)(record[field['col_name']])
for field in schema if 'col_name' in field # Add only fields that have col_name.
}) for record in data] # for field in schema if not field['type'] == 'ManyToManyField'}) for record in data]
def create_multi_dictionary():
'''
Create multi dictionary for multi_2
'''
multi_dictionary = defaultdict(list)
for field in schema:
if field.get('database', False) == 'multi_2':
multi_dictionary[field['table']].append(field)
return multi_dictionary
multi_dictionary = create_multi_dictionary()
def create_multi_record(index, record):
all_multi_value_lists = []
for multi_key, multi_fields in multi_dictionary.items():
#Get the values of each multi field
multi_values_values = []
multi_values_keys = []
for multi_field in multi_fields:
field_value = record[multi_field['col_name']]
field_value_splitted = multi_field['l_multi'](field_value)
multi_values_keys.append(multi_field['name'])
multi_values_values.append(field_value_splitted)
# Make sure that all lists has the same number of values
set_of_the_length_of_all_values = set(map(len, multi_values_values))
if len(set_of_the_length_of_all_values) != 1:
#error_message = 'Index: {} . Fields do not have the same size..'.format(index)
error_message = 'Multi fields do not have the same size..'
import_errors[error_message] += 1
print (error_message)
return None
#print ('multi_values_values:')
#print (multi_values_values)
#print ('multi_values_keys')
#print (multi_values_keys)
multi_values_list_of_dicts = [dict(zip(multi_values_keys,x)) for x in zip(*multi_values_values)]
# [{'gene': 'NBPF9', 'transcript': 'NM_001037675.3', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NM_001037501.2', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '6', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102404.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '6', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF8', 'transcript': 'NR_102405.1', 'location': 'exonic_nc', 'function': None, 'codon': None, 'exon': '5', 'protein': None, 'coding': None, 'sift': None}, {'gene': 'NBPF9', 'transcript': 'NM_001277444.1', 'location': 'exonic', 'function': 'missense', 'codon': 'CGC', 'exon': '7', 'protein': 'p.His295Arg', 'coding': 'c.885A>G', 'sift': None}]
#print (multi_values_list_of_dicts)
all_multi_value_lists.append(multi_values_list_of_dicts)
# Combine multiple values
#print (reduce(lambda x,y: x*y, all_multi_value_lists))
if not all_multi_value_lists:
return None
ret = [dict(reduce(operator.or_, [y.items() for y in x])) for x in product(*all_multi_value_lists)]
#print ('Multivalues:', len(ret))
#print (ret)
return ret
if True:
instances = []
for line_index, record in enumerate(data):
#print (line_index, record['# locus'])
if (line_index+1) % 1000 == 0:
print ('{} Imported records: {}/{} {:.1%}'.format(print_now(), line_index+1, len(data), line_index/len(data)))
table_db_options = {}
for field in schema:
if not 'col_name' in field: # Add only fields that have col_name.
continue
key = field['name']
if field.get('database', '') == 'multi_2':
continue # Later add multi_2 fields
elif field.get('database', '') == 'multi_1':
key = field['name'] + '_raw'
try:
if 'line_l' in field:
value = field['line_l'](record)
elif 'l' in field:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = field['l'](record[field['col_name']])
else:
# col_name might not exist in record! Data is not supposed to contain all fields!
if not field['col_name'] in record:
continue
value = record[field['col_name']]
except ZazzException as t_exception:
e_message = str(t_exception)
e_info = t_exception.info
import_errors[e_message] += 1
value = None
except Exception as e:
print ('Record:')
print (record)
print ('Index:', line_index)
raise e
if pd.isnull(value):
value = None # np.nan confuses django when attempting: int(np.nan)
table_db_options[key] = value
multi_records = create_multi_record(line_index, record)
if multi_records:
for multi_record in multi_records:
table_db_options = {**table_db_options, **multi_record}
instances.append(table_db(**table_db_options))
else:
#print (table_db_options)
instances.append(table_db(**table_db_options))
count = len(instances)
print ('Adding IDs..')
for i, instance in enumerate(instances):
instance.id = i
print ('{} Bulk creating main objects..'.format(print_now()))
# bulk_create does not work with many-to-many relationships. ..sniff...
# https://docs.djangoproject.com/en/2.0/ref/models/querysets/
if False:
'''
For testing
'''
print (serializers.serialize("json", instances, indent=4))
for inst in instances:
inst.save()
print (inst.pk)
if True:
table_db.objects.bulk_create(instances)
print (' {} Done'.format(print_now()))
print ('Indexing main objects..')
querySet = table_db.objects.filter(id__gte=0, id__lt=count)
assert querySet.count() == count
index = {x.id:x for x in querySet}
m2m_index = {}
print ('Creating many to many relationships..')
#errors_1 = 0
def process_multi_1(store):
errors_1 = 0
# m2m_objects: store in memory ALL m2m object, so that we can bulk import them later
m2m_objects = defaultdict(list)
# For each record store which many to many has
m2m_object_references = defaultdict(dict)
for id_, record in enumerate(data):
instance = index[id_]
if id_ % 1000 == 0:
print ('{} Entries: {}/{}'.format(print_now(), id_+1, count))
#l_multi is obligatory
for m2m_table in m2m_tables:
try:
# field['col_name'] in record : col_name does not have to be present in record!
m2m_fields = OrderedDict({field['name']: field['l_multi'](record[field['col_name']]) for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record})
except ZazzException as e:
import_errors[str(e)] += 1
print (str(e))
m2m_fields = {}
#assert that all have the same length
if not len(set(len(x) for x in m2m_fields.values())) == 1:
print ('Index: {} . Fields do not have the same size..'.format(id_))
debug = {field['name']: record[field['col_name']] for field in schema if field.get('table', None) == m2m_table and field['col_name'] in record}
#print (debug)
#print (m2m_fields)
errors_1 += 1
m2m_fields = {}
#raise Exception()
#Create database objects
# {a: [1,2] , b: [3,4]} --> [{a:1, b:3} , {a:2, b:4}]. See also create_attribute_records()
m2m_fields = [dict(zip(m2m_fields.keys(), x)) for x in zip(*m2m_fields.values())]
current_length = len(m2m_objects[m2m_table])
m2m_objects[m2m_table].extend(m2m_fields)
m2m_object_references[id_][m2m_table] = (current_length, current_length+len(m2m_fields))
# m2m_fields: [{'Gene': 'CLCNKB', 'Transcript': 'NM_000085.4'}, {'Gene': 'CLCNKB', 'Transcript': 'NM_001165945.2'}]
if not m2m_fields:
# Do nothing.
#getattr(getattr(instance, m2m_table), 'set')(None)
#instance.save()
continue
if False:
'''
Always create new multi object
'''
m2m_objects = [getattr(models, m2m_table)(**m2m_field) for m2m_field in m2m_fields]
#Save objects
for o in m2m_objects:
o.save()
if False:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get_or_create(**m2m_field)[0] for m2m_field in m2m_fields]
if store:
'''
Create only if they don't exist
'''
m2m_objects = [getattr(models, m2m_table).objects.get(**m2m_field)[0] for m2m_field in m2m_fields]
#print (m2m_table, m2m_fields)
#Add it to the main instance
if False:
getattr(getattr(instance, m2m_table), 'set')(m2m_objects)
if store:
#Save instance
instance.save()
return m2m_objects, m2m_object_references
m2m_objects, m2m_object_references = process_multi_1(store=False)
print ('Bulk creating Many2Many Objects')
table_insance_objects = {}
for m2m_table, m2m_values in m2m_objects.items():
print (' Bulk creating:', m2m_table)
table_instance = getattr(models, m2m_table)
table_insance_objects[m2m_table]= [table_instance(**x) for x in m2m_values]
getattr(models, m2m_table).objects.bulk_create(table_insance_objects[m2m_table])
print (' Getting Primary Key of:', m2m_table)
table_insance_objects[m2m_table] = table_instance.objects.all().order_by('pk')
print ('Connecting main instance with m2m..')
#Create through objects
through_objects = {m2m_table: getattr(Samples, m2m_table).through for m2m_table in m2m_tables}
for id_, record in enumerate(data):
if id_ % 1000 == 0:
print ('{} {}/{}'.format(print_now(), id_, len(data)))
instance = index[id_]
#
if not id_ in m2m_object_references:
continue
for table_name, table_indexes in m2m_object_references[id_].items():
#print (table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if True:
'''
2019-04-18 16:09:42 0/10000
2019-04-18 16:10:15 1000/10000 --> 33
2019-04-18 16:10:48 2000/10000 --> 33
2019-04-18 16:11:22 3000/10000 --> 34
2019-04-18 16:11:57 4000/10000 --> 35
2019-04-18 16:12:33 5000/10000 --> 36
'''
getattr(getattr(instance, table_name), 'set')(table_insance_objects[table_name][table_indexes[0]: table_indexes[1]+1])
if False:
'''
2019-04-18 16:05:47 0/10000
2019-04-18 16:06:14 1000/10000 --> 27
2019-04-18 16:06:43 2000/10000 --> 29
2019-04-18 16:07:13 3000/10000 --> 30
2019-04-18 16:07:48 4000/10000 --> 35
2019-04-18 16:08:27 5000/10000 --> 39
'''
tmp1 = [{table_name.lower() + '_id': table_insance_objects[table_name][i].pk, 'samples_id': instance.pk} for i in range(table_indexes[0], table_indexes[1]+1)]
#print (tmp1)
tmp2 = [through_objects[table_name](**x) for x in tmp1]
#print (tmp2)
through_objects[table_name].objects.bulk_create(tmp2)
instance.save()
#a=1/0
print ('Errors 1:', errors_1)
print ('Annotating with external CSVs')
#Index external_internals
external_internals = {external['name']:external for external in externals if external['type'] == 'internal'}
for external in externals:
if external['type'] == 'csv':
external_name = external['name']
print (' Name: {}'.format(external_name))
print (' Loading file: {}'.format(external['filename']))
csv = pd.read_csv(external['filename'], **external['read_csv_options'])
csv_dict = csv.to_dict('index')
print (' DONE. Length: {}'.format(len(csv_dict)))
#Take the central table object
all_objects = table_db.objects.all()
print (' Annotating {} main records'.format(all_objects.count()))
o_counter = 0
o_annotated = 0
for o in all_objects:
o_counter += 1
if o_counter % 100 == 0:
print (' {}. Objects: {} Annotated: {}'.format(print_now(), o_counter, o_annotated))
matched = external['matcher'](csv, o) # THIS IS VERY SLOW!!
if matched.empty:
continue
o_annotated += 1
# This is not empty
# Create foreign object
# Create not M2M
not_m2m = {field['name']:fields['l'](matched) for field in external['fields'] if not field['type'] == 'ManyToManyField'}
foreign_object = get_model(external_name)(**not_m2m)
# Save model
foreign_object.save()
# Create M2M objects
m2m = {field['name']: field['l_m2m'](matched) for field in external['fields'] if field['type'] == 'ManyToManyField'}
#print (m2m) # {'Clinical_Significance': [{'Clinical Significance': 'Benign'}]}
m2m_objects = {k: [get_model(k).objects.get_or_create(**x)[0] for x in v] for k,v in m2m.items()}
#print (m2m_objects)
#Connect with foreign_object
for k, v in m2m_objects.items():
getattr(foreign_object, k).set(v)
#Save foreign_object
foreign_object.save()
#Now that we have the foreign_object stored, we can connect it with the foreign key of the main object
setattr(o, external_name, foreign_object) # o.external_name = foreign_object
#Update main object
o.save()
print ('Annotated {} out of {} records'.format(o_annotated, o_counter))
print ('DONE!')
if False: # This is legacy code. To be removed...
for field in schema:
if not field['type'] == 'ManyToManyField':
continue
if instance is None:
instance = index[id_]
values = field['l_multi'](record[field['col_name']])
#Store the values
m2m_db = getattr(models, field['name'])
if not field['name'] in m2m_index:
m2m_index[field['name']] = {}
#Perform as little as possible queries to the database
for value in values:
if not value in m2m_index[field['name']]:
m2m_index[field['name']][value] = m2m_db.objects.get_or_create(**{field['name']:value})[0]
values_obj = [m2m_index[field['name']][value] for value in values]
#Create M2M relationship
getattr(getattr(instance, field['name']+'_multi'), 'set')(values_obj)
instance.save()
print ('IMPORT ERRROS')
print (json.dumps(import_errors, indent=4))
print ('DONE')
def comma_int(x):
return int(x.replace(',', ''))
def isNone(x):
return None if pd.isnull(x) else x
def splitUnique(field_name, sep, t=str):
'''
t = type
'''
def f(x):
if pd.isnull(x):
return [None]
if not hasattr(x, 'split'):
if t == str:
return [str(x)]
elif t == int:
return [int(x)]
elif t == float:
return [float(x)]
raise ZazzException(f'Invalid type: {type(x).__name__} in field: {field_name}')
return [y if y else None for y in x.split(sep)]
return f
def join_set_sep(sep):
def f(x):
if pd.isnull(x):
return None
return sep.join(sorted(list(set(x.split('|')))))
return f
def parse_vcf(fn):
'''
'''
print ('Parsing VCF:', fn)
ret = {}
c=0
with open(fn) as f:
for l in f:
if l[0] == '#':
continue
c += 1
if c%10000 == 0:
print ('VCF LINES READ:', c)
ls = l.strip().split()
chromosome = ls[0].replace('chr', '')
position = int(ls[1])
reference = ls[3]
alternative = ls[4]
genotype = ls[9].split(':')[0]
#print (genotype)
#print (chromosome)
#print (position)
#print (reference)
#print (alternative)
if len(reference) != 1:
continue
if len(alternative) != 1:
continue
if genotype == '0/1':
geno = 'HET'
elif genotype == '1/1':
geno = 'HOM'
else:
print (genotype)
a=1/0
ret[(chromosome, position)] = (reference, alternative, geno)
print ('VCF LINES TOTAL:', c)
return ret
######### BED ##########
'''
http://genome.ucsc.edu/FAQ/FAQformat#format1
The first three required BED fields are:
chrom - The name of the chromosome (e.g. chr3, chrY, chr2_random) or scaffold (e.g. scaffold10671).
chromStart - The starting position of the feature in the chromosome or scaffold. The first base in a chromosome is numbered 0.
chromEnd - The ending position of the feature in the chromosome or scaffold. The chromEnd base is not included in the display of the feature. For example, the first 100 bases of a chromosome are defined as chromStart=0, chromEnd=100, and span the bases numbered 0-99.
====A====
chr1 5 6 K
chr1 10 20 L
chr1 25 26 M
====B====
chr1 7 9 A AA
chr1 8 10 B BB
chr1 9 12 C CC
chr1 10 11 D DD
chr1 10 20 E EE
chr1 12 14 F FF
chr1 17 25 G GG
chr1 18 20 H HH
a = BedTool('a.bed')
b = BedTool('b.bed')
#print (a.intersect(b, loj=True))
a.intersect(b, loj=True).saveas('c.bed')
chr1 5 6 K . -1 -1 . .
chr1 10 20 L chr1 9 12 C CC
chr1 10 20 L chr1 10 11 D DD
chr1 10 20 L chr1 10 20 E EE
chr1 10 20 L chr1 12 14 F FF
chr1 10 20 L chr1 17 25 G GG
chr1 10 20 L chr1 18 20 H HH
chr1 25 26 M . -1 -1 . .
'''
def bed_create_from_db(querySet, filename):
'''
QuerySet must be ordered, According to position!
'''
print ('Saving DB objects in BED format in: {}'.format(filename))
with open(filename, 'w') as f:
c = 0
for o in querySet:
c += 1
if c % 1000 == 0:
print (' Saved: {} records'.format(c))
record = [
o.Chromosome,
str(o.Position),
str(o.Position+1), ## FIX ME !!!!
str(o.id),
]
f.write('\t'.join(record) + '\n')
print (' Done')
def bed_loj(filename_1, filename_2, output_filename):
'''
https://daler.github.io/pybedtools/autodocs/pybedtools.bedtool.BedTool.intersect.html
'''
print (' Intersecting LOJ with BedTools..')
a = BedTool(filename_1)
b = BedTool(filename_2)
a.intersect(b, loj=True).saveas(output_filename)
print (' DONE')
######### END OF BED ###
def chromosome_sizes_hg19():
'''
http://hgdownload.cse.ucsc.edu/goldenPath/hg19/bigZips/hg19.chrom.sizes
'''
return {
'chr1': 249250621,
'chr2': 243199373,
'chr3': 198022430,
'chr4': 191154276,
'chr5': 180915260,
'chr6': 171115067,
'chr7': 159138663,
'chrX': 155270560,
'chr8': 146364022,
'chr9': 141213431,
'chr10': 135534747,
'chr11': 135006516,
'chr12': 133851895,
'chr13': 115169878,
'chr14': 107349540,
'chr15': 102531392,
'chr16': 90354753,
'chr17': 81195210,
'chr18': 78077248,
'chr20': 63025520,
'chrY' : 59373566,
'chr19': 59128983,
'chr22': 51304566,
'chr21': 48129895,
'chrM' : 16571,
}
def list_of_chromosomes():
return list(map(lambda x : 'chr' + x, list(map(str, range(1,23)) ) + ['X', 'Y', 'M']))
def accumulate_chromosome_sizes_hg19():
s = chromosome_sizes_hg19()
m = list_of_chromosomes()
offset = 0
ret = {}
for chromosome in m:
ret[chromosome] = offset
offset += s[chromosome]
return ret
def accumulative_position(chromosome, position):
chr_index = g['list_of_chromosomes'].index(chromosome)
if chr_index == 0:
return int(position)
return g['accumulate_chromosome_sizes_hg19'][g['list_of_chromosomes'][chr_index]] + int(position)
def pandas_to_vcf(df, chromosome_f, position_f, reference_f, alternative_f, vcf_filename):
print ('Converting pandas to VCF')
input_data = df.to_dict('records')
f = open(vcf_filename, 'w')
f.write('##fileformat=VCFv4.0\n')
f.write('\t'.join(['#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO']) + '\n')
for line_index, record in enumerate(input_data):
#print (record)
if line_index%10000 == 0:
print ('Lines: {}/{}'.format(line_index, len(input_data)))
chromosome = chromosome_f(record)
if 'chr' in chromosome.lower():
chromosome = chromosome.replace('chr', '')
position = position_f(record)
reference = reference_f(record)
alternative = alternative_f(record)
try:
#savoura = convert_to_base64(json.dumps(record))
savoura = '.'
except TypeError as e:
if str(e) == "Object of type Timestamp is not JSON serializable":
print ('Error:', str(e), ' ignoring..')
continue
else:
raise e
to_print = [chromosome, position, '.', reference, alternative, '.', savoura, '.']
to_print_str = '\t'.join(map(str, to_print)) + '\n'
f.write(to_print_str)
f.close()
print (f'Created file: {vcf_filename}')
def setup_1():
'''
Setups DB + Javascript
'''
#print ('Adding accumulative_position..')
#df['accumulative_position'] = df.apply(lambda x: accumulative_position(*x['# locus'].split(':')), axis=1)
#print (' ..DONE')
def split_location(x):
#print (x)
if pd.isnull(x):
return 'UNKNOWN'
return x.split('|')[0]
def log_f(x):
#print (x)
if str(x)=='0.0':
return None
return int(-np.log10(float(x)))
def allele_coverage(x):
#print (x)
if '.' in x:
sp = x.split('.')
elif ',' in x:
sp = x.split(',')
if not len(sp) == 2:
error_message = 'More than 2 values in allele coverage'
import_errors[error_message] += 1
#print (x)
#assert False
return [None, None]
return list(map(int, sp))
def allele_coverage_2(x):
ac = x['allele_coverage']
#print ('Allele Coverage:', ac)
if not ',' in str(ac) and not '.' in str(ac):
int_ac = int(ac)
str_ac = str(ac)
coverage = int(x['coverage'])
for i in range(1,len(str_ac)):
part1 = int(str_ac[:i])
part2 = int(str_ac[i:])
if part1 + part2 == int(coverage):
ret = [part1, part2]
#print (f'Allele Coverage: {ac} Coverage: {coverage} Coverage: {ret}')
return ret
#print (f'Allele Coverage: {ac}')
#print ('Coverage:', coverage)
e = ZazzException('Invalid Coverage value')
e.set_info({'coverage': coverage, 'allele_coverage': ac})
raise e
else:
return allele_coverage(ac)
def maf_f(x):
if type(x).__name__ in ['int', 'float']:
ret = x
else:
ret = float(x.split(':')[0])
return ret
def sift_raw_f(x):
#return ','.join(str(x).split('|'))
return x
def f5000Exomes_AMAF(name):
def f(x):
if pd.isnull(x):
return None
if x.count(':') != 2:
e = ZazzException('Invalid 5000Exomes values')
e.set_info({'value': x})
raise e
values = dict(y.split('=') for y in x.split(':'))
return float(values[name])
return f
def cosmic_multi_f(x):
# print (x)
# print (type(x))
if str(x) == 'nan':
return ['NaN']
if | pd.isnull(x) | pandas.isnull |
"""
This file contains methods to visualize EKG data, clean EKG data and run EKG analyses.
Classes
-------
EKG
Notes
-----
All R peak detections should be manually inspected with EKG.plotpeaks method and
false detections manually removed with rm_peak method. After rpeak examination,
NaN data can be accounted for by removing false IBIs with rm_ibi method.
"""
import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import scipy as sp
import statistics
import biosignalsnotebooks as bsnb
from scipy import interpolate
from numpy import linspace, diff, zeros_like, arange, array
from mne.time_frequency import psd_array_multitaper
from pandas.plotting import register_matplotlib_converters
from scipy.signal import welch
class EKG:
"""
Run EKG analyses including cleaning and visualizing data.
Attributes
----------
metadata : nested dict
File information and analysis information.
Format {str:{str:val}} with val being str, bool, float, int or pd.Timestamp.
data : pd.DataFrame
Raw data of the EKG signal (mV) and the threshold line (mV) at each sampled time point.
rpeak_artifacts : pd.Series
False R peak detections that have been removed.
rpeaks_added : pd.Series
R peak detections that have been added.
ibi_artifacts : pd.Series
Interbeat interval data that has been removed.
rpeaks : pd.Series
Cleaned R peaks data without removed peaks and with added peaks.
rr : np.ndarray
Time between R peaks (ms).
nn : np.ndarray
Cleaned time between R peaks (ms) without removed interbeat interval data.
rpeaks_df : pd.DataFrame
Raw EKG value (mV) and corresponding interbeat interval leading up to the data point (ms) at each sampled point.
"""
def __init__(self, fname, fpath, polarity='positive', min_dur=True, epoched=True, smooth=False, sm_wn=30, mw_size=100, upshift=3.5,
rms_align='right', detect_peaks=True, pan_tompkins=True):
"""
Initialize raw EKG object.
Parameters
----------
fname : str
Filename.
fpath : str
Path to file.
polarity: str, default 'positive'
polarity of the R-peak deflection. Options: 'positive', 'negative'
min_dur : bool, default True
Only load files that are >= 5 minutes long.
epoched : bool, default True
Whether file was epoched using ioeeg.
smooth : bool, default False
Whether raw signal should be smoothed before peak detections. Set True if raw data has consistent high frequency noise
preventing accurate peak detection.
sm_wn : float, default 30
Size of moving window for rms smoothing preprocessing (milliseconds).
mw_size : float, default 100
Moving window size for R peak detection (milliseconds).
upshift : float, default 3.5
Detection threshold upshift for R peak detection (% of signal).
rms_align: str, default 'right'
whether to align the mean to the right or left side of the moving window [options: 'right', 'left']
rm_artifacts : bool, default False
Apply IBI artifact removal algorithm.
detect_peaks : bool, default True
Option to detect R peaks and calculate interbeat intervals.
pan_tompkins : bool, default True
Option to detect R peaks using automatic pan tompkins detection method
Returns
-------
EKG object. Includes R peak detections and calculated inter-beat intervals if detect_peaks is set to True.
"""
# set metadata
filepath = os.path.join(fpath, fname)
if epoched == False:
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
elif epoched == True:
in_num, start_date, slpstage, cycle, epoch = fname.split('_')[:5]
self.metadata = {'file_info':{'in_num': in_num,
'fname': fname,
'path': filepath,
'rpeak_polarity': polarity,
'start_date': start_date,
'sleep_stage': slpstage,
'cycle': cycle
}
}
if epoched == True:
self.metadata['file_info']['epoch'] = epoch
# load the ekg
self.load_ekg(min_dur)
# flip the polarity if R peaks deflections are negative
if polarity == 'negative':
self.data = self.data*-1
if smooth == True:
self.rms_smooth(sm_wn)
else:
self.metadata['analysis_info']['smooth'] = False
# create empty series for false detections removed and missed peaks added
self.rpeak_artifacts = | pd.Series() | pandas.Series |
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
from openpyxl import load_workbook
import pickle
import numpy as np
def summarize_reg(gene_set, n_data_matrix):
"""
The SUMMARIZE_REG operation summarizes all the data analysis results, by collecting them in convenient tables that exported locally in Excel files.
:param gene_set: the set of genes of interest to summarize
:param n_data_matrix: number identifying the data matrix to summarize (only 2,3 and 5 values are permitted)
Example::
import genereg as gr
gr.SummaryResults.summarize_reg(gene_set='DNA_REPAIR', n_data_matrix=2)
gr.SummaryResults.summarize_reg(gene_set='DNA_REPAIR', n_data_matrix=3)
gr.SummaryResults.summarize_reg(gene_set='DNA_REPAIR', n_data_matrix=5)
"""
# Check input parameters
if n_data_matrix not in [2, 3, 5]:
raise ValueError('Data Matrix ERROR! Possible values: {2,3,5}')
# Define the model to summarize
model = str(n_data_matrix)
# Define the previous model to check
if model == '3':
previous_model = str(int(model)-1)
elif model == '5':
previous_model = str(int(model)-2)
# Import the dictionary of genes of interest with their candidate regulatory genes
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Import the list of genes of interest and extract in a list the Gene Symbols of all the genes belonging to the current gene set
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
SYMs_current_pathway = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if path == gene_set:
SYMs_current_pathway.append(sym)
if (model == '3') or (model == '5'):
# Create a list containing the Gene Symbols of the regulatory genes of the genes in the current gene set
current_regulatory_genes = []
for key, value in dict_RegulGenes.items():
if key in SYMs_current_pathway:
for gene in value:
if gene not in current_regulatory_genes:
current_regulatory_genes.append(gene)
if (model == '5'):
# Create a list containing the Gene Symbols of genes in the other gene sets
SYMs_other_pathways = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if not (path == gene_set):
SYMs_other_pathways.append(sym)
# Create a list containing the Gene Symbols of the regulatory genes of the genes in the other gene sets
regulatory_genes_other = []
for key, value in dict_RegulGenes.items():
if key not in SYMs_current_pathway:
for gene in value:
if gene not in regulatory_genes_other:
regulatory_genes_other.append(gene)
# Create a dataframe to store final summary results of feature selection and linear regression for each gene of interest
if model == '2':
lr_summary_df = pd.DataFrame(index=SYMs_current_pathway, columns=['Inital N° Features','Discarded Features','N° Features Selected','R2','Adj.R2'])
else:
lr_summary_df = pd.DataFrame(index=SYMs_current_pathway, columns=['Inital N° Features','N° New Features w.r.t. Previous Model','Discarded Features','Features Available for Selection','N° Features Selected','R2','Adj.R2'])
for current_gene in SYMs_current_pathway:
# Import the current and, if present, the previous model of the current gene
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == current_gene, 'ENTREZ_GENE_ID'].iloc[0]
model_gene_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+model+'.xlsx',sheetname='Sheet1',header=0)
if not (model == '2'):
previous_model_df = pd.read_excel('./4_Data_Matrix_Construction/Model'+previous_model+'/Gene_'+gene_ID+'_['+current_gene+']'+'_('+gene_set+')-Model_v'+previous_model+'.xlsx',sheetname='Sheet1',header=0)
# Extract the list of new features, added to the current model, w.r.t. the previous one
if not (model == '2'):
current_model_col_names = set(list(model_gene_df.columns.values))
previous_model_col_names = set(list(previous_model_df.columns.values))
new_features = list(current_model_col_names - previous_model_col_names)
lr_summary_df.set_value(current_gene,'N° New Features w.r.t. Previous Model',len(new_features))
# Import the feature selection and linear regression summary tables
feature_sel_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/FeatureSelection/M'+model+'/Feature_Selection_SUMMARY.xlsx',sheetname='Sheet1',header=0)
lin_reg_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/LinearRegression/M'+model+'/Linear_Regression_R2_SCORES.xlsx',sheetname='Sheet1',header=0)
# Extract and store the results in the summary dataframe
n_features = feature_sel_df.get_value(current_gene,'TOT Inital N° Features')
n_feat_discarded = feature_sel_df.get_value(current_gene,'Discarded Features')
if not (model == '2'):
n_features_available = feature_sel_df.get_value(current_gene,'Features Available for Selection')
n_feat_selected = feature_sel_df.get_value(current_gene,'N° Features Selected')
lin_reg_r2_adj = lin_reg_df.get_value(current_gene,'Adj.R2')
lin_reg_r2 = lin_reg_df.get_value(current_gene,'R2')
lr_summary_df.set_value(current_gene,'Inital N° Features',n_features)
lr_summary_df.set_value(current_gene,'Discarded Features',n_feat_discarded)
if not (model == '2'):
lr_summary_df.set_value(current_gene,'Features Available for Selection',n_features_available)
lr_summary_df.set_value(current_gene,'N° Features Selected',n_feat_selected)
lr_summary_df.set_value(current_gene,'Adj.R2',lin_reg_r2_adj)
lr_summary_df.set_value(current_gene,'R2',lin_reg_r2)
# Export the summary dataframe in an Excel file
lr_summary_df = lr_summary_df.sort_values(by=['Adj.R2'], ascending=[False])
filename = './5_Data_Analysis/'+gene_set+'/Feature_Selection_and_Linear_Regression.xlsx'
writer = ExcelWriter(filename,engine='openpyxl')
try:
writer.book = load_workbook(filename)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
except IOError:
# if the file does not exist yet, I will create it
pass
lr_summary_df.to_excel(writer,'M'+model)
writer.save()
# Extract relevant features for each gene of the current gene set and store them in a summary table and define a dataframe to summarize the features selected for each model gene
features_summary_df = pd.DataFrame(index=SYMs_current_pathway)
for current_gene in SYMs_current_pathway:
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == current_gene, 'ENTREZ_GENE_ID'].iloc[0]
# Import the regression coefficients
coeff_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/LinearRegression/M'+model+'/Coefficients/Coefficients_(M'+model+')-Gene_'+gene_ID+'_['+current_gene+'].xlsx',sheetname='Sheet1',header=0)
# Import the confidence intervals
ci_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/LinearRegression/M'+model+'/ConfidenceIntervals/Confidence_Intervals_(M'+model+')-Gene_'+gene_ID+'_['+current_gene+'].xlsx',sheetname='Sheet1',header=0)
# Import the correlation matrix
corr_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/LinearRegression/M'+model+'/CorrelationMatrix/Correlation_Matrix_(M'+model+')-Gene_'+gene_ID+'_['+current_gene+'].xlsx',sheetname='Sheet1',header=0)
# Select the relevant features on the basis of the confidence intervals (i.e. if the confidence interval does not contain 0, then the feature is significant for the model)
relevant_features = []
for index, row in ci_df.iterrows():
s = row['Significant Feature?']
if s == 'YES':
relevant_features.append(index)
# Create a dataframe to store the results and fill it with requested information
relevant_features_df = pd.DataFrame(index=relevant_features, columns=['Regression Coefficient','Feature Description','Correlation with EXPRESSION ('+current_gene+')'])
for index, row in coeff_df.iterrows():
gene = row['feature']
if gene in relevant_features:
coeff = row['coefficient']
relevant_features_df.set_value(gene,'Regression Coefficient',coeff)
for index, row in corr_df.iterrows():
if index in relevant_features:
corr_with_target = row['EXPRESSION ('+current_gene+')']
relevant_features_df.set_value(index,'Correlation with EXPRESSION ('+current_gene+')',corr_with_target)
# Add the features descriptions
if model == '2':
for f in relevant_features:
if f in SYMs_current_pathway:
descr = 'Gene of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif 'METHYLATION' in f:
descr = 'Methylation of the model gene ['+current_gene+'] in the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif f in dict_RegulGenes[current_gene]:
descr = 'Candidate regulatory gene of the model gene ['+current_gene+'] of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif model == '3':
for f in relevant_features:
if f in SYMs_current_pathway:
descr = 'Gene of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif 'METHYLATION' in f:
descr = 'Methylation of the model gene ['+current_gene+'] in the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif f in dict_RegulGenes[current_gene]:
descr = 'Candidate regulatory gene of the model gene ['+current_gene+'] of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif not(f in dict_RegulGenes[current_gene]) and (f in current_regulatory_genes):
descr = 'Candidate regulatory gene of the genes in the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif model == '5':
for f in relevant_features:
if f in SYMs_current_pathway:
descr = 'Gene of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif 'METHYLATION' in f:
descr = 'Methylation of the model gene ['+current_gene+'] in the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif f in dict_RegulGenes[current_gene]:
descr = 'Candidate regulatory gene of the model gene ['+current_gene+'] of the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif not(f in dict_RegulGenes[current_gene]) and (f in current_regulatory_genes):
descr = 'Candidate regulatory gene of the genes in the '+gene_set+' set'
relevant_features_df.set_value(f,'Feature Description',descr)
elif f in SYMs_other_pathways:
df_temp = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == f].copy()
f_pathways = (df_temp.GENE_SET.unique()).tolist()
descr = 'Gene of the gene sets: '+(', '.join(f_pathways))
relevant_features_df.set_value(f,'Feature Description',descr)
elif f in regulatory_genes_other:
regulated_genes_other = []
for key, value in dict_RegulGenes.items():
if key in SYMs_other_pathways:
if f in value:
regulated_genes_other.append(key)
df_temp = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'].isin(regulated_genes_other)].copy()
f_pathways = (df_temp.GENE_SET.unique()).tolist()
descr = 'Candidate regulatory gene of the gene sets: '+(', '.join(f_pathways))
relevant_features_df.set_value(f,'Feature Description',descr)
# Export the dataframe in an Excel file
relevant_features_df = relevant_features_df.sort_values(by=['Regression Coefficient'], ascending=[False])
filename = './5_Data_Analysis/'+gene_set+'/Relevant_Features-Gene_'+gene_ID+'_['+current_gene+'].xlsx'
writer = ExcelWriter(filename,engine='openpyxl')
try:
writer.book = load_workbook(filename)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
except IOError:
# if the file does not exist yet, I will create it
pass
relevant_features_df.to_excel(writer,'M'+model)
writer.save()
relevance_order = 0
for index, row in relevant_features_df.iterrows():
relevance_order = relevance_order + 1
str_order = str(relevance_order)
features_summary_df.set_value(current_gene, index, str_order)
# Export the summary dataframe in an Excel file
filename = './5_Data_Analysis/'+gene_set+'/Order_of_Features_Selected.xlsx'
writer = ExcelWriter(filename,engine='openpyxl')
try:
writer.book = load_workbook(filename)
writer.sheets = dict((ws.title, ws) for ws in writer.book.worksheets)
except IOError:
# if the file does not exist yet, I will create it
pass
features_summary_df.to_excel(writer,'M'+model)
writer.save()
def summarize_r2(gene_set):
"""
The SUMMARIZE_R2 operation summarizes R2 and Adjusted R2 scores for each target gene in each regression model, storing them locally in a single Excel file.
:param gene_set: the set of genes of interest to summarize
Example::
import genereg as gr
gr.SummaryResults.summarize_r2(gene_set='DNA_REPAIR')
"""
# Define the models to summarize
models = ['2','3','5']
# Import the list of genes of interest and extract in a list the Gene Symbols of all the genes belonging to the current gene set
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
SYMs_current_pathway = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if path == gene_set:
SYMs_current_pathway.append(sym)
# Create a dataframe to store the final summary about features selected and R2 scores, for each gene of interest
summary_df = pd.DataFrame(index=SYMs_current_pathway, columns=['Selected Features (M2)','R2 (M2)','Adj.R2 (M2)','Selected Features (M3)','R2 (M3)','Adj.R2 (M3)','Selected Features (M5)','R2 (M5)','Adj.R2 (M5)'])
for m in models:
# Import the summary table for the current model
current_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/Feature_Selection_and_Linear_Regression.xlsx',sheetname='M'+m,header=0)
# Extract the useful information and store it the summary dataframe
for index, row in current_df.iterrows():
n_features = row['N° Features Selected']
adj_r2 = row['Adj.R2']
r2 = row['R2']
summary_df.set_value(index,'Selected Features (M'+m+')',n_features)
summary_df.set_value(index,'Adj.R2 (M'+m+')',adj_r2)
summary_df.set_value(index,'R2 (M'+m+')',r2)
# Export the summary dataframe in an Excel file
summary_df = summary_df.sort_values(by=['Adj.R2 (M5)'], ascending=[False])
writer = ExcelWriter('./5_Data_Analysis/'+gene_set+'/R2_and_Adj.R2_Scores.xlsx',engine='openpyxl')
summary_df.to_excel(writer,'Sheet1')
writer.save()
def best_genes(gene_set):
"""
The BEST_GENES operation collects the target genes with the best linear fit (Adjusted R2 >= 0.6) in the three regression models, storing them locally in a single Excel file.
:param gene_set: the set of genes of interest to summarize
Example::
import genereg as gr
gr.SummaryResults.best_genes(gene_set='DNA_REPAIR')
"""
# Define the models to summarize
models = ['2','3','5']
# Import the list of genes of interest and extract in a list the Gene Symbols of all the genes belonging to the current gene set
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
SYMs_current_pathway = []
for index, row in EntrezConversion_df.iterrows():
sym = row['GENE_SYMBOL']
path = row['GENE_SET']
if path == gene_set:
SYMs_current_pathway.append(sym)
for model in models:
# Import the summary table cointaining the value of the R2 for each model and for each gene of interest in the current gene set
# and extract the list of "good" genes, the ones that have R2 >= 0.6 in the current model
summary_r2_df = pd.read_excel('./5_Data_Analysis/'+gene_set+'/R2_and_Adj.R2_Scores.xlsx',sheetname='Sheet1',header=0)
summary_r2_df = summary_r2_df.sort_values(by=['Adj.R2 (M'+model+')'], ascending=[False])
good_genes = []
for index, row in summary_r2_df.iterrows():
current_model_r2 = row['Adj.R2 (M'+model+')']
if current_model_r2 >= 0.6:
good_genes.append(index)
# Create a dataframe to the store the results, indexed by the "good" genes and a progressive number for each significant feature extracted during the regression process
num_features = []
for i in list(range(1,20)):
num_features.append(i)
# Cartesian product to generate tuples for multi-indexing
import itertools
tuples = []
for i in itertools.product(good_genes,num_features):
tuples.append(i)
# Set the multiple indexes to be used in the dataframe
index = pd.MultiIndex.from_tuples(tuples, names=['GENE', '#'])
# Create the dataframe and initialize the empty cells as empty strings
final_summary_df = pd.DataFrame('', index = index, columns = ['Significant Feature','Adj.R2','Regression Coefficient','Feature Description'])
# Fill the dictionary
# Adjusted R2
for current_gene in good_genes:
r2 = summary_r2_df.get_value(current_gene,'Adj.R2 (M'+model+')')
final_summary_df.loc[(current_gene, 1),'Adj.R2'] = r2
# Features
for current_gene in good_genes:
# Import the table containing the significant features extracted for the current gene
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == current_gene, 'ENTREZ_GENE_ID'].iloc[0]
features_df = | pd.read_excel('./5_Data_Analysis/'+gene_set+'/Relevant_Features-Gene_'+gene_ID+'_['+current_gene+'].xlsx',sheetname='M'+model,header=0) | pandas.read_excel |
# Bamadrew95's stat compiler. Uses Beautiful Soup and Panda to grab stats from web and compile and sort them by team.
# Based on Bamaham93's FBS Scraper program
######################################################################################################
# This will be used to store static html for a single teams page so that I don't have to keep using get requests while developing
testingSourceCode = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>cfbstats.com - 2009 Teams</title>
<link rel="stylesheet" type="text/css" href="/css/cfbstats.css">
<style type="text/css">
<!--
#wrapper {
background: none;
}
#content {
width: 100%;
}
div.conferences {
width: 90%;
margin: 20px auto;
}
div.conference {
float: left;
width: 23.5%;
margin-left: 1%;
}
div.conference h1 {
text-align: center;
font-size: 95%;
color: white;
font-weight: bold;
background-color: #616161;
padding: 2px 0px;
border-bottom-color: #616161;
border-bottom-style: solid;
border-bottom-width: 1px;
}
div.conference li {
padding-left: 5px;
padding-top: 2px;
padding-bottom: 3px;
}
div.conference ul {
list-style: none;
width: 100%;
}
-->
</style>
<!-- START Drew's Adds -->
<script type="text/javascript" src="http://coachesbythenumbers.com/wp-content/custom-php/jquery/jquery.min.js?version=133"></script>
<script type="text/javascript" src="http://coachesbythenumbers.com/wp-content/custom-php/cfbstats.js"></script>
<link rel="image_src" type="image/jpeg" href="http://coachesbythenumbers.com/wp-content/custom-php/images/socialLogo.png" />
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/bootstrap/css/bootstrap.min.css?version=1">
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/bootstrap/css/bootstrap-theme.min.css?version=1">
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/cfbstats.css">
<!-- Start DFP SETUP - Header Tags -->
<script type='text/javascript'>
var gptadslots=[], googletag = googletag || {}; googletag.cmd = googletag.cmd || [];
(function(){ var gads = document.createElement('script');
gads.async = true; gads.type = 'text/javascript';
var useSSL = 'https:' == document.location.protocol;
gads.src = (useSSL ? 'https:' : 'http:') + '//www.googletagservices.com/tag/js/gpt.js';
var node = document.getElementsByTagName('script')[0];
node.parentNode.insertBefore(gads, node);
})();
</script>
<script type='text/javascript' src='https://img.bnqt.com/lib/js/sdpdfphelper.js'></script>
<script type='text/javascript'>
googletag.cmd.push(function() {
googletag.pubads().enableAsyncRendering(); googletag.pubads().enableSingleRequest();
googletag.pubads().setTargeting('title', sdpTargeting.title)
.setTargeting('targetPaths', sdpTargeting.targetPaths)
.setTargeting('fullPath', sdpTargeting.fullPath)
.setTargeting('queryStr', sdpTargeting.queryStr)
.setTargeting('domainName', sdpTargeting.domainName);
});
</script>
<!-- DFP SETUP end -->
<!-- END Drew's Adds -->
</head>
<body>
<div id="wrapper">
<div id="breadcrumb">
<span class="label">You are here:</span>
<a href="/">Home</a>
<span class="separator">></span>
<span class="selected">2009 Teams</span>
</div> <!-- breadcrumb -->
<!-- New navbar, added by Drew -->
<nav class="navbar navbar-default navbar-static-top navbar-inverse navbar-thin" role="navigation" style="margin-bottom:0px;">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<img src="http://coachesbythenumbers.com/wp-content/custom-php/images/sportsourcenav.png" width="210" style="border:0;margin-top:13px"/>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav navbar-nav-thin navbar-right">
<li><a href="mailto:<EMAIL>">Advertise?</a></li>
<li><a href="mailto:<EMAIL>">Contact Us</a></li>
<li><a href="https://twitter.com/SportSourceA"><img src="http://coachesbythenumbers.com/wp-content/custom-php/images/twitter_icon_24.png" width="14" style="margin-bottom:5px"/> @SportSourceA</a></li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
<div id="globalHeader">
<a id="imagemap" href="/"></a>
<ul id="globalNav">
<li><a href="/">Home</a></li>
<li><a href="/2020/national/index.html">National</a></li>
<li><a href="/2020/conference/index.html">Conferences</a></li>
<li><a href="/2020/team/index.html">Teams</a></li>
<li><a href="/2020/player/index.html">Players</a></li>
</ul>
</div> <!-- globalHeader -->
<div id="content">
<h1 id="pageTitle">2009 Teams</h1>
<div id="seasons">
<ul>
<li><a href="/2020/team/index.html">2020</a></li>
<li><a href="/2019/team/index.html">2019</a></li>
<li><a href="/2018/team/index.html">2018</a></li>
<li><a href="/2017/team/index.html">2017</a></li>
<li><a href="/2016/team/index.html">2016</a></li>
<li><a href="/2015/team/index.html">2015</a></li>
<li><a href="/2014/team/index.html">2014</a></li>
<li><a href="/2013/team/index.html">2013</a></li>
<li><a href="/2012/team/index.html">2012</a></li>
<li><a href="/2011/team/index.html">2011</a></li>
<li><a href="/2010/team/index.html">2010</a></li>
<li class="selected">2009</li>
</ul>
</div>
<div class="conferences">
<div class="conference">
<h1>Atlantic Coast Conference</h1>
<ul>
<li><a href="/2009/team/67/index.html">Boston College</a></li>
<li class="even-row"><a href="/2009/team/147/index.html">Clemson</a></li>
<li><a href="/2009/team/193/index.html">Duke</a></li>
<li class="even-row"><a href="/2009/team/234/index.html">Florida State</a></li>
<li><a href="/2009/team/255/index.html">Georgia Tech</a></li>
<li class="even-row"><a href="/2009/team/392/index.html">Maryland</a></li>
<li><a href="/2009/team/415/index.html">Miami (Florida)</a></li>
<li class="even-row"><a href="/2009/team/457/index.html">North Carolina</a></li>
<li><a href="/2009/team/490/index.html">North Carolina State</a></li>
<li class="even-row"><a href="/2009/team/746/index.html">Virginia</a></li>
<li><a href="/2009/team/742/index.html">Virginia Tech</a></li>
<li class="even-row"><a href="/2009/team/749/index.html">Wake Forest</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Big 12 Conference</h1>
<ul>
<li><a href="/2009/team/51/index.html">Baylor</a></li>
<li class="even-row"><a href="/2009/team/157/index.html">Colorado</a></li>
<li><a href="/2009/team/311/index.html">Iowa State</a></li>
<li class="even-row"><a href="/2009/team/328/index.html">Kansas</a></li>
<li><a href="/2009/team/327/index.html">Kansas State</a></li>
<li class="even-row"><a href="/2009/team/434/index.html">Missouri</a></li>
<li><a href="/2009/team/463/index.html">Nebraska</a></li>
<li class="even-row"><a href="/2009/team/522/index.html">Oklahoma</a></li>
<li><a href="/2009/team/521/index.html">Oklahoma State</a></li>
<li class="even-row"><a href="/2009/team/703/index.html">Texas</a></li>
<li><a href="/2009/team/697/index.html">Texas A&M</a></li>
<li class="even-row"><a href="/2009/team/700/index.html">Texas Tech</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Big East Conference</h1>
<ul>
<li><a href="/2009/team/140/index.html">Cincinnati</a></li>
<li class="even-row"><a href="/2009/team/164/index.html">Connecticut</a></li>
<li><a href="/2009/team/367/index.html">Louisville</a></li>
<li class="even-row"><a href="/2009/team/545/index.html">Pittsburgh</a></li>
<li><a href="/2009/team/587/index.html">Rutgers</a></li>
<li class="even-row"><a href="/2009/team/651/index.html">South Florida</a></li>
<li><a href="/2009/team/688/index.html">Syracuse</a></li>
<li class="even-row"><a href="/2009/team/768/index.html">West Virginia</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Big Ten Conference</h1>
<ul>
<li><a href="/2009/team/301/index.html">Illinois</a></li>
<li class="even-row"><a href="/2009/team/306/index.html">Indiana</a></li>
<li><a href="/2009/team/312/index.html">Iowa</a></li>
<li class="even-row"><a href="/2009/team/418/index.html">Michigan</a></li>
<li><a href="/2009/team/416/index.html">Michigan State</a></li>
<li class="even-row"><a href="/2009/team/428/index.html">Minnesota</a></li>
<li><a href="/2009/team/509/index.html">Northwestern</a></li>
<li class="even-row"><a href="/2009/team/518/index.html">Ohio State</a></li>
<li><a href="/2009/team/539/index.html">Penn State</a></li>
<li class="even-row"><a href="/2009/team/559/index.html">Purdue</a></li>
<li><a href="/2009/team/796/index.html">Wisconsin</a></li>
</ul>
</div> <!-- conference -->
<div style="clear:both;"></div>
</div> <!-- conferences -->
<div class="conferences">
<div class="conference">
<h1>Conference USA</h1>
<ul>
<li><a href="/2009/team/196/index.html">East Carolina</a></li>
<li class="even-row"><a href="/2009/team/288/index.html">Houston</a></li>
<li><a href="/2009/team/388/index.html">Marshall</a></li>
<li class="even-row"><a href="/2009/team/404/index.html">Memphis</a></li>
<li><a href="/2009/team/574/index.html">Rice</a></li>
<li class="even-row"><a href="/2009/team/663/index.html">SMU</a></li>
<li><a href="/2009/team/664/index.html">Southern Mississippi</a></li>
<li class="even-row"><a href="/2009/team/718/index.html">Tulane</a></li>
<li><a href="/2009/team/719/index.html">Tulsa</a></li>
<li class="even-row"><a href="/2009/team/9/index.html">UAB</a></li>
<li><a href="/2009/team/128/index.html">UCF</a></li>
<li class="even-row"><a href="/2009/team/704/index.html">UTEP</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Independent</h1>
<ul>
<li><a href="/2009/team/725/index.html">Army</a></li>
<li class="even-row"><a href="/2009/team/726/index.html">Navy</a></li>
<li><a href="/2009/team/513/index.html">Notre Dame</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Mid-American Conference</h1>
<ul>
<li><a href="/2009/team/5/index.html">Akron</a></li>
<li class="even-row"><a href="/2009/team/47/index.html">Ball State</a></li>
<li><a href="/2009/team/71/index.html">Bowling Green</a></li>
<li class="even-row"><a href="/2009/team/86/index.html">Buffalo</a></li>
<li><a href="/2009/team/129/index.html">Central Michigan</a></li>
<li class="even-row"><a href="/2009/team/204/index.html">Eastern Michigan</a></li>
<li><a href="/2009/team/331/index.html">Kent State</a></li>
<li class="even-row"><a href="/2009/team/414/index.html">Miami (Ohio)</a></li>
<li><a href="/2009/team/503/index.html">Northern Illinois</a></li>
<li class="even-row"><a href="/2009/team/519/index.html">Ohio</a></li>
<li><a href="/2009/team/690/index.html">Temple</a></li>
<li class="even-row"><a href="/2009/team/709/index.html">Toledo</a></li>
<li><a href="/2009/team/774/index.html">Western Michigan</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Mountain West Conference</h1>
<ul>
<li><a href="/2009/team/721/index.html">Air Force</a></li>
<li class="even-row"><a href="/2009/team/77/index.html">BYU</a></li>
<li><a href="/2009/team/156/index.html">Colorado State</a></li>
<li class="even-row"><a href="/2009/team/473/index.html">New Mexico</a></li>
<li><a href="/2009/team/626/index.html">San Diego State</a></li>
<li class="even-row"><a href="/2009/team/698/index.html">TCU</a></li>
<li><a href="/2009/team/465/index.html">UNLV</a></li>
<li class="even-row"><a href="/2009/team/732/index.html">Utah</a></li>
<li><a href="/2009/team/811/index.html">Wyoming</a></li>
</ul>
</div> <!-- conference -->
<div style="clear:both;"></div>
</div> <!-- conferences -->
<div class="conferences">
<div class="conference">
<h1>Pacific-10 Conference</h1>
<ul>
<li><a href="/2009/team/29/index.html">Arizona</a></li>
<li class="even-row"><a href="/2009/team/28/index.html">Arizona State</a></li>
<li><a href="/2009/team/107/index.html">California</a></li>
<li class="even-row"><a href="/2009/team/529/index.html">Oregon</a></li>
<li><a href="/2009/team/528/index.html">Oregon State</a></li>
<li class="even-row"><a href="/2009/team/657/index.html">USC</a></li>
<li><a href="/2009/team/674/index.html">Stanford</a></li>
<li class="even-row"><a href="/2009/team/110/index.html">UCLA</a></li>
<li><a href="/2009/team/756/index.html">Washington</a></li>
<li class="even-row"><a href="/2009/team/754/index.html">Washington State</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Southeastern Conference</h1>
<ul>
<li><a href="/2009/team/8/index.html">Alabama</a></li>
<li class="even-row"><a href="/2009/team/31/index.html">Arkansas</a></li>
<li><a href="/2009/team/37/index.html">Auburn</a></li>
<li class="even-row"><a href="/2009/team/235/index.html">Florida</a></li>
<li><a href="/2009/team/257/index.html">Georgia</a></li>
<li class="even-row"><a href="/2009/team/334/index.html">Kentucky</a></li>
<li><a href="/2009/team/365/index.html">LSU</a></li>
<li class="even-row"><a href="/2009/team/433/index.html">Mississippi</a></li>
<li><a href="/2009/team/430/index.html">Mississippi State</a></li>
<li class="even-row"><a href="/2009/team/648/index.html">South Carolina</a></li>
<li><a href="/2009/team/694/index.html">Tennessee</a></li>
<li class="even-row"><a href="/2009/team/736/index.html">Vanderbilt</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Sun Belt Conference</h1>
<ul>
<li><a href="/2009/team/30/index.html">Arkansas State</a></li>
<li class="even-row"><a href="/2009/team/229/index.html">Florida Atlantic</a></li>
<li><a href="/2009/team/231/index.html">Florida International</a></li>
<li class="even-row"><a href="/2009/team/671/index.html">Louisiana-Lafayette</a></li>
<li><a href="/2009/team/498/index.html">Louisiana-Monroe</a></li>
<li class="even-row"><a href="/2009/team/419/index.html">Middle Tennessee</a></li>
<li><a href="/2009/team/497/index.html">North Texas</a></li>
<li class="even-row"><a href="/2009/team/716/index.html">Troy</a></li>
<li><a href="/2009/team/772/index.html">Western Kentucky</a></li>
</ul>
</div> <!-- conference -->
<div class="conference">
<h1>Western Athletic Conference</h1>
<ul>
<li><a href="/2009/team/66/index.html">Boise State</a></li>
<li class="even-row"><a href="/2009/team/96/index.html">Fresno State</a></li>
<li><a href="/2009/team/277/index.html">Hawai'i</a></li>
<li class="even-row"><a href="/2009/team/295/index.html">Idaho</a></li>
<li><a href="/2009/team/366/index.html">Louisiana Tech</a></li>
<li class="even-row"><a href="/2009/team/466/index.html">Nevada</a></li>
<li><a href="/2009/team/472/index.html">New Mexico State</a></li>
<li class="even-row"><a href="/2009/team/630/index.html">San Jose State</a></li>
<li><a href="/2009/team/731/index.html">Utah State</a></li>
</ul>
</div> <!-- conference -->
<div style="clear:both;"></div>
</div> <!-- conferences -->
<div style="clear:both;"></div>
<div id="footer">
<p>Copyright © 2006-2020 www.cfbstats.com All rights reserved.</p>
<p><a href="/?page_id=4">Terms of Use</a></p>
<p id="timestamp">12/13/2020 20:58:21</p>
<!-- SMG_CFBStats/300x250_1a/sports/general -->
<div id="usmg_ad__general_sports_300x250_1a">
<script type='text/javascript'>
googletag.defineSlot('/7103/SMG_CFBStats/300x250_1a/sports/general', [[300,250],[300,600]], 'usmg_ad__general_sports_300x250_1a').addService(googletag.pubads());
googletag.enableServices();
googletag.display('usmg_ad__general_sports_300x250_1a');
</script>
</div>
</div> <!-- footer -->
</div> <!-- content -->
</div> <!-- wrapper -->
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-10344841-1");
pageTracker._trackPageview();
} catch(err) {}</script>
<!-- Start of ADS -->
<div id="usmg_ad__main_sports_skin">
<script type='text/javascript'>
//googletag.defineSlot('/7103/SMG_CFBStats/skin/sports/main', [1,1], 'usmg_ad__main_sports_skin').addService(googletag.pubads());
//googletag.enableServices();
//googletag.display('usmg_ad__main_sports_skin');
</script>
</div>
<!-- SMG_CFBStats/skin/sports/general -->
<div id="usmg_ad__general_sports_skin">
<script type='text/javascript'>
googletag.defineSlot('/7103/SMG_CFBStats/skin/sports/general', [1,1], 'usmg_ad__general_sports_skin').addService(googletag.pubads());
googletag.enableServices();
googletag.display('usmg_ad__general_sports_skin');
</script>
</div>
<br/><br/><br/><br/>
<!-- End of ADS -->
</body>
</html>"""
######################################################################################################
# import all needed modules
import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
# The year below controls what year of stats the program will grab
year = "2009"
team_index_url = "http://cfbstats.com/" + year + "/team/index.html"
# Will be used to create team objects which will have attributes such as name, url, conference, record, and an attribute for each stat as well as an object for each game played.
class Teams:
pass
# Below begins code to scrape a given url for html content
# Below is commented out while I use static html to develop
# class Scraper:
# def soup_recipe(address):
# url = address
# page = requests.get(url)
# soup = BeautifulSoup(page.content, "html.parser")
# Scraper.page_status(page)
# return soup
# def page_status(results):
# try:
# results.status_code # results is the callable object requests module creates, i.e. index_page_soup
# if results.status_code == 200:
# print("Connected")
# pass
# if results.status_code != 200:
# print("Page Connection Error")
# except:
# print("An error occured while attempting to connect to the website.")
# END Scraper Code
# BEGIN gathers urls and team names for each team from team index page
# These are the attributes I'm building so that I can use these later to assign them to team object attributes
teams_by_conf = []
conf_names = []
team_names = []
team_urls = []
# Uncomment the below line of code to GET actual web results
# team_index_soup = Scraper.soup_recipe(team_index_url)
team_index_soup = BeautifulSoup(testingSourceCode, "html.parser")
# delete the above line of code once full developed
# I assume that this is a delay between get requests? Is it in seconds?
time.sleep(0.25)
# creates a list of each conference div
conferences = team_index_soup.find_all(class_="conference")
# Adds conference names to a list
for conf in conferences:
conf_names.append(conf.h1.string)
# Finds all a tags within conferences and compiles them into a list of lists by conference
for conf in conferences:
teams_by_conf.append(conf.find_all("a"))
# loop through each conference and then loops through each team and appends name and url to separate lists
for conf_teams in teams_by_conf:
for team in conf_teams:
team_names.append(team.string)
team_urls.append(team.get("href"))
# print(team_names)
# print(team_urls)
# print(conf_names)
##############################################################################################################
# Source code for a team page to test without get requests
##############################################################################################################
test_team_page = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<title>cfbstats.com - 2009 Boston College Eagles</title>
<link rel="stylesheet" type="text/css" href="/css/cfbstats.css">
<link rel="stylesheet" type="text/css" href="/css/leader.css">
<style type="text/css">
<!--
table.team-statistics {width: 80%;}
table.team-statistics th.statistic-name {width: 50%;}
table.team-statistics th.team-stat {width: 25%;}
table.team-statistics th.opponent-stat {width: 25%;}
table.team-schedule {width: 60%;}
table.team-schedule th.date {width: 11%;}
table.team-schedule th.opponent {width: 34%;}
table.team-schedule th.result {width: 15%;}
table.team-schedule th.game-time {width: 20%;}
table.team-schedule th.attendance {width: 20%;}
table.team-record {width: 30%;}
table.team-record th.split-name {width: 50%;}
table.team-record th.w-l-record {width: 50%;}
-->
</style>
<!-- START Drew's Adds -->
<script type="text/javascript" src="http://coachesbythenumbers.com/wp-content/custom-php/jquery/jquery.min.js?version=133"></script>
<script type="text/javascript" src="http://coachesbythenumbers.com/wp-content/custom-php/cfbstats.js"></script>
<link rel="image_src" type="image/jpeg" href="http://coachesbythenumbers.com/wp-content/custom-php/images/socialLogo.png" />
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/bootstrap/css/bootstrap.min.css?version=1">
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/bootstrap/css/bootstrap-theme.min.css?version=1">
<link rel="stylesheet" href="http://coachesbythenumbers.com/wp-content/custom-php/cfbstats.css">
<!-- Start DFP SETUP - Header Tags -->
<script type='text/javascript'>
var gptadslots=[], googletag = googletag || {}; googletag.cmd = googletag.cmd || [];
(function(){ var gads = document.createElement('script');
gads.async = true; gads.type = 'text/javascript';
var useSSL = 'https:' == document.location.protocol;
gads.src = (useSSL ? 'https:' : 'http:') + '//www.googletagservices.com/tag/js/gpt.js';
var node = document.getElementsByTagName('script')[0];
node.parentNode.insertBefore(gads, node);
})();
</script>
<script type='text/javascript' src='https://img.bnqt.com/lib/js/sdpdfphelper.js'></script>
<script type='text/javascript'>
googletag.cmd.push(function() {
googletag.pubads().enableAsyncRendering(); googletag.pubads().enableSingleRequest();
googletag.pubads().setTargeting('title', sdpTargeting.title)
.setTargeting('targetPaths', sdpTargeting.targetPaths)
.setTargeting('fullPath', sdpTargeting.fullPath)
.setTargeting('queryStr', sdpTargeting.queryStr)
.setTargeting('domainName', sdpTargeting.domainName);
});
</script>
<!-- DFP SETUP end -->
<!-- END Drew's Adds -->
</head>
<body>
<div id="wrapper">
<div id="breadcrumb">
<span class="label">You are here:</span>
<a href="/">Home</a>
<span class="separator">></span>
<a href="/2009/team/index.html">2009 Teams</a>
<span class="separator">></span>
<span class="selected">Boston College</span>
</div> <!-- breadcrumb -->
<!-- New navbar, added by Drew -->
<nav class="navbar navbar-default navbar-static-top navbar-inverse navbar-thin" role="navigation" style="margin-bottom:0px;">
<div class="container-fluid">
<!-- Brand and toggle get grouped for better mobile display -->
<div class="navbar-header">
<img src="http://coachesbythenumbers.com/wp-content/custom-php/images/sportsourcenav.png" width="210" style="border:0;margin-top:13px"/>
</div>
<!-- Collect the nav links, forms, and other content for toggling -->
<div class="collapse navbar-collapse" id="bs-example-navbar-collapse-1">
<ul class="nav navbar-nav navbar-nav-thin navbar-right">
<li><a href="mailto:<EMAIL>">Advertise?</a></li>
<li><a href="mailto:<EMAIL>">Contact Us</a></li>
<li><a href="https://twitter.com/SportSourceA"><img src="http://coachesbythenumbers.com/wp-content/custom-php/images/twitter_icon_24.png" width="14" style="margin-bottom:5px"/> @SportSourceA</a></li>
</ul>
</div><!-- /.navbar-collapse -->
</div><!-- /.container-fluid -->
</nav>
<div id="globalHeader">
<a id="imagemap" href="/"></a>
<ul id="globalNav">
<li><a href="/">Home</a></li>
<li><a href="/2020/national/index.html">National</a></li>
<li><a href="/2020/conference/index.html">Conferences</a></li>
<li><a href="/2020/team/index.html">Teams</a></li>
<li><a href="/2020/player/index.html">Players</a></li>
</ul>
</div> <!-- globalHeader -->
<div id="content">
<h1 id="pageTitle">2009 Boston College Eagles
through 01/07/2010</h1>
<div id="seasons">
<ul>
<li><a href="/2020/team/67/index.html">2020</a></li>
<li><a href="/2019/team/67/index.html">2019</a></li>
<li><a href="/2018/team/67/index.html">2018</a></li>
<li><a href="/2017/team/67/index.html">2017</a></li>
<li><a href="/2016/team/67/index.html">2016</a></li>
<li><a href="/2015/team/67/index.html">2015</a></li>
<li><a href="/2014/team/67/index.html">2014</a></li>
<li><a href="/2013/team/67/index.html">2013</a></li>
<li><a href="/2012/team/67/index.html">2012</a></li>
<li><a href="/2011/team/67/index.html">2011</a></li>
<li><a href="/2010/team/67/index.html">2010</a></li>
<li class="selected">2009</li>
</ul>
</div>
<div class="team-statistics">
<table class="team-statistics">
<caption>Team Statistics</caption>
<tr>
<th scope="col" class="statistic-name"></th>
<th scope="col" class="team-stat">Boston College</th>
<th scope="col" class="opponent-stat">Opponents</th>
</tr>
<tr>
<td class="statistic-name">Scoring: Points/Game</td>
<td>24.8</td>
<td>19.8</td>
</tr>
<tr class="even-row">
<td class="statistic-name">Scoring: Games - Points</td>
<td>13 - 322</td>
<td>13 - 257</td>
</tr>
<tr>
<td class="statistic-name row-group">First Downs: Total</td>
<td class="row-group">214</td>
<td class="row-group">235</td>
</tr>
<tr class="even-row">
<td class="statistic-name">First Downs: Rushing - Passing - By Penalty</td>
<td>91 - 102 - 21</td>
<td>78 - 145 - 12</td>
</tr>
<tr>
<td class="statistic-name row-group">Rushing: Yards / Attempt</td>
<td class="row-group">3.78</td>
<td class="row-group">2.99</td>
</tr>
<tr class="even-row">
<td class="statistic-name">Rushing: Attempts - Yards - TD</td>
<td>473 - 1788 - 17</td>
<td>448 - 1340 - 11</td>
</tr>
<tr>
<td class="statistic-name row-group">Passing: Rating</td>
<td class="row-group">117.95</td>
<td class="row-group">118.70</td>
</tr>
<tr class="even-row">
<td class="statistic-name">Passing: Yards</td>
<td>2423</td>
<td>2919</td>
</tr>
<tr>
<td class="statistic-name">Passing: Attempts - Completions - Interceptions - TD</td>
<td>350 - 176 - 18 - 21</td>
<td>448 - 277 - 15 - 12</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Total Offense: Yards / Play</td>
<td class="row-group">5.12</td>
<td class="row-group">4.75</td>
</tr>
<tr>
<td class="statistic-name">Total Offense: Plays - Yards</td>
<td>823 - 4211</td>
<td>896 - 4259</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Punt Returns: Yards / Return</td>
<td class="row-group">12.21</td>
<td class="row-group">8.31</td>
</tr>
<tr>
<td class="statistic-name">Punt Returns: Returns - Yards - TD</td>
<td>14 - 171 - 1</td>
<td>29 - 241 - 1</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Kickoff Returns: Yards / Return</td>
<td class="row-group">21.18</td>
<td class="row-group">19.25</td>
</tr>
<tr>
<td class="statistic-name">Kickoff Returns: Returns - Yards - TD</td>
<td>44 - 932 - 0</td>
<td>48 - 924 - 0</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Punting: Yards / Punt</td>
<td class="row-group">41.03</td>
<td class="row-group">37.73</td>
</tr>
<tr>
<td class="statistic-name">Punting: Punts - Yards</td>
<td>80 - 3282</td>
<td>78 - 2943</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Interceptions: Returns - Yards - TD</td>
<td class="row-group">15 - 155 - 1</td>
<td class="row-group">18 - 323 - 3</td>
</tr>
<tr>
<td class="statistic-name">Fumbles: Number - Lost</td>
<td>17 - 8</td>
<td>13 - 8</td>
</tr>
<tr class="even-row">
<td class="statistic-name">Penalties: Number - Yards</td>
<td>64 - 561</td>
<td>80 - 724</td>
</tr>
<tr>
<td class="statistic-name">Time of Possession / Game</td>
<td>29:19.00</td>
<td>30:41.00</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">3rd Down Conversions: Conversion %</td>
<td class="row-group">30.23%</td>
<td class="row-group">34.01%</td>
</tr>
<tr>
<td class="statistic-name">3rd Down Conversions: Attempts - Conversions</td>
<td>172 - 52</td>
<td>197 - 67</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">4th Down Conversions: Conversion %</td>
<td class="row-group">63.64%</td>
<td class="row-group">40%</td>
</tr>
<tr>
<td class="statistic-name">4th Down Conversions: Attempts - Conversions</td>
<td>11 - 7</td>
<td>20 - 8</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Red Zone: Success %</td>
<td class="row-group">88.64%</td>
<td class="row-group">77.5%</td>
</tr>
<tr>
<td class="statistic-name">Red Zone: Attempts - Scores</td>
<td>44 - 39</td>
<td>40 - 31</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">Field Goals: Success %</td>
<td class="row-group">92.9%</td>
<td class="row-group">80%</td>
</tr>
<tr>
<td class="statistic-name">Field Goals: Attempts - Made</td>
<td>14 - 13</td>
<td>25 - 20</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">PAT Kicking: Success %</td>
<td class="row-group">97.5%</td>
<td class="row-group">100%</td>
</tr>
<tr>
<td class="statistic-name">PAT Kicking: Attempts - Made</td>
<td>40 - 39</td>
<td>27 - 27</td>
</tr>
<tr class="even-row">
<td class="statistic-name row-group">2-Point Conversions: Success %</td>
<td class="row-group">-</td>
<td class="row-group">100%</td>
</tr>
<tr>
<td class="statistic-name">2-Point Conversions: Attempts - Made</td>
<td>0 - 0</td>
<td>1 - 1</td>
</tr>
</table>
</div> <!-- team-statistics -->
<div class="team-schedule">
<table class="team-schedule">
<caption>Team Schedule</caption>
<tr>
<th scope="col" class="date">Date</th>
<th scope="col" class="opponent">Opponent</th>
<th scope="col" class="result">Result</th>
<th scope="col" class="game-time">Game Time</th>
<th scope="col" class="attendance">Attendance</th>
</tr>
<tr>
<td class="date">09/05/09</td>
<td class="opponent">Northeastern</td>
<td class="result">W 54-0</td>
<td>2:48</td>
<td>33,262</td>
</tr>
<tr class="even-row">
<td class="date">09/12/09</td>
<td class="opponent"><a href="/2009/team/331/index.html">Kent St.</a></td>
<td class="result">W 34-7</td>
<td>2:46</td>
<td>25,165</td>
</tr>
<tr>
<td class="date">09/19/09</td>
<td class="opponent">@ 24 <a href="/2009/team/147/index.html">Clemson</a></td>
<td class="result">L 7-25</td>
<td>4:46</td>
<td>77,000</td>
</tr>
<tr class="even-row">
<td class="date">09/26/09</td>
<td class="opponent"><a href="/2009/team/749/index.html">Wake Forest</a></td>
<td class="result">W 27-24</td>
<td>3:06</td>
<td>40,892</td>
</tr>
<tr>
<td class="date">10/03/09</td>
<td class="opponent"><a href="/2009/team/234/index.html">Florida St.</a></td>
<td class="result">W 28-21</td>
<td>3:24</td>
<td>40,029</td>
</tr>
<tr class="even-row">
<td class="date">10/10/09</td>
<td class="opponent">@ 10 <a href="/2009/team/742/index.html">Virginia Tech</a></td>
<td class="result">L 14-48</td>
<td>3:02</td>
<td>66,233</td>
</tr>
<tr>
<td class="date">10/17/09</td>
<td class="opponent"><a href="/2009/team/490/index.html">North Carolina St.</a></td>
<td class="result">W 52-20</td>
<td>3:23</td>
<td>35,261</td>
</tr>
<tr class="even-row">
<td class="date">10/24/09</td>
<td class="opponent">@ <a href="/2009/team/513/index.html"><NAME></a></td>
<td class="result">L 16-20</td>
<td>3:30</td>
<td>80,795</td>
</tr>
<tr>
<td class="date">10/31/09</td>
<td class="opponent">23 <a href="/2009/team/129/index.html">Central Mich.</a></td>
<td class="result">W 31-10</td>
<td>2:57</td>
<td>34,128</td>
</tr>
<tr class="even-row">
<td class="date">11/14/09</td>
<td class="opponent">@ <a href="/2009/team/746/index.html">Virginia</a></td>
<td class="result">W 14-10</td>
<td>3:09</td>
<td>44,324</td>
</tr>
<tr>
<td class="date">11/21/09</td>
<td class="opponent"><a href="/2009/team/457/index.html">North Carolina</a></td>
<td class="result">L 13-31</td>
<td>3:10</td>
<td>41,272</td>
</tr>
<tr class="even-row">
<td class="date">11/28/09</td>
<td class="opponent">@ <a href="/2009/team/392/index.html">Maryland</a></td>
<td class="result">W 19-17</td>
<td>2:59</td>
<td>35,042</td>
</tr>
<tr>
<td class="date">12/26/09</td>
<td class="opponent">+ 22 <a href="/2009/team/657/index.html">Southern California</a></td>
<td class="result">L 13-24</td>
<td>3:18</td>
<td>40,121</td>
</tr>
<tfoot>
<tr>
<td class="legend" colspan="5">@ : Away, + : Neutral Site</td>
</tr>
</tfoot>
</table>
</div> <!-- team-schedule -->
<div class="team-record">
<table class="team-record">
<caption>Team Record</caption>
<tr>
<th scope="col" class="split-name">Split</th>
<th scope="col" class="w-l-record">Record</th>
</tr>
<tr>
<td class="split-name">All Games</td>
<td>8-5</td>
</tr>
<tr class="even-row">
<td class="split-name">at Home</td>
<td>6-1</td>
</tr>
<tr>
<td class="split-name">on Road/Neutral Site</td>
<td>2-4</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. Conference</td>
<td>5-3</td>
</tr>
<tr>
<td class="split-name">vs. Non-Conference</td>
<td>3-2</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. Ranked (AP)</td>
<td>1-3</td>
</tr>
<tr>
<td class="split-name">vs. Unranked (AP)</td>
<td>7-2</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. FBS (I-A)</td>
<td>7-5</td>
</tr>
<tr>
<td class="split-name">vs. FCS (I-AA)</td>
<td>1-0</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. FBS Winning</td>
<td>2-4</td>
</tr>
<tr>
<td class="split-name">vs. FBS Non-Winning</td>
<td>5-1</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. BCS AQ</td>
<td>5-5</td>
</tr>
<tr>
<td class="split-name">vs. BCS non-AQ</td>
<td>2-0</td>
</tr>
<tr class="even-row">
<td class="split-name">vs. FBS Power 5</td>
<td>5-4</td>
</tr>
<tr>
<td class="split-name">vs. FBS non-Power 5</td>
<td>2-1</td>
</tr>
<tr class="even-row">
<td class="split-name">in August/September</td>
<td>3-1</td>
</tr>
<tr>
<td class="split-name">in October</td>
<td>3-2</td>
</tr>
<tr class="even-row">
<td class="split-name">in November</td>
<td>2-1</td>
</tr>
<tr>
<td class="split-name">in December/January</td>
<td>0-1</td>
</tr>
</table>
</div> <!-- team-record -->
<div id="footer">
<p>Copyright © 2006-2020 www.cfbstats.com All rights reserved.</p>
<p><a href="/?page_id=4">Terms of Use</a></p>
<p id="timestamp">12/13/2020 20:58:21</p>
<!-- SMG_CFBStats/300x250_1a/sports/general -->
<div id="usmg_ad__general_sports_300x250_1a">
<script type='text/javascript'>
googletag.defineSlot('/7103/SMG_CFBStats/300x250_1a/sports/general', [[300,250],[300,600]], 'usmg_ad__general_sports_300x250_1a').addService(googletag.pubads());
googletag.enableServices();
googletag.display('usmg_ad__general_sports_300x250_1a');
</script>
</div>
</div> <!-- footer -->
</div> <!-- content -->
<div id="leftColumn">
<div class="section">
<ul>
<li class="selected">Team Home</li>
<li><a href="/2009/team/67/roster.html">Roster</a></li>
<li><a href="/2009/team/67/rushing/index.html">Rushing</a></li>
<li><a href="/2009/team/67/passing/index.html">Passing</a></li>
<li><a href="/2009/team/67/receiving/index.html">Receiving</a></li>
<li><a href="/2009/team/67/puntreturn/index.html">Punt Returns</a></li>
<li><a href="/2009/team/67/kickreturn/index.html">Kickoff Returns</a></li>
<li><a href="/2009/team/67/punting/index.html">Punting</a></li>
<li><a href="/2009/team/67/kickoff/index.html">Kickoffs</a></li>
<li><a href="/2009/team/67/kicking/index.html">Place Kicking</a></li>
<li><a href="/2009/team/67/scoring/index.html">Scoring</a></li>
<li><a href="/2009/team/67/total/index.html">Total Offense</a></li>
<li><a href="/2009/team/67/allpurpose/index.html">All-Purpose Running</a></li>
<li><a href="/2009/team/67/interception/index.html">Interceptions</a></li>
<li><a href="/2009/team/67/fumblereturn/index.html">Fumble Returns</a></li>
<li><a href="/2009/team/67/tackle/index.html">Tackles</a></li>
<li><a href="/2009/team/67/tackleforloss/index.html">Tackles For Loss</a></li>
<li><a href="/2009/team/67/sack/index.html">Sacks</a></li>
<li><a href="/2009/team/67/miscdefense/index.html">Misc. Defense</a></li>
<li><a href="/2009/team/67/firstdown/offense/split.html">First Downs</a></li>
<li><a href="/2009/team/67/penalty/offense/split.html">Penalties</a></li>
<li><a href="/2009/team/67/thirddown/offense/split.html">3rd Down Conversions</a></li>
<li><a href="/2009/team/67/fourthdown/offense/split.html">4th Down Conversions</a></li>
<li><a href="/2009/team/67/redzone/offense/split.html">Red Zone Conversions</a></li>
<li><a href="/2009/team/67/turnovermargin/split.html">Turnover Margin</a></li>
</ul>
</div> <!-- section -->
</div> <!-- leftColumn -->
<div id="wrapperClear"/>
</div> <!-- wrapper -->
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-10344841-1");
pageTracker._trackPageview();
} catch(err) {}</script>
<!-- Start of ADS -->
<div id="usmg_ad__main_sports_skin">
<script type='text/javascript'>
//googletag.defineSlot('/7103/SMG_CFBStats/skin/sports/main', [1,1], 'usmg_ad__main_sports_skin').addService(googletag.pubads());
//googletag.enableServices();
//googletag.display('usmg_ad__main_sports_skin');
</script>
</div>
<!-- SMG_CFBStats/skin/sports/general -->
<div id="usmg_ad__general_sports_skin">
<script type='text/javascript'>
googletag.defineSlot('/7103/SMG_CFBStats/skin/sports/general', [1,1], 'usmg_ad__general_sports_skin').addService(googletag.pubads());
googletag.enableServices();
googletag.display('usmg_ad__general_sports_skin');
</script>
</div>
<br/><br/><br/><br/>
<!-- End of ADS -->
</body>
</html>"""
##############################################################################################################
# Scrape stats from individual team pages
##############################################################################################################
team_id = 0
team_name = "<NAME>"
team_stat_titles = [
"Points/Game",
"Games",
"Total Points",
"First Downs",
"Rush First Downs",
"Pass First Downs",
"First Downs by Penalty",
"Rush Yards/Att",
"Rush Att",
"Rush Yards",
"Rush TDs",
"Pass Rating",
"Pass Yards",
"Pass Att",
"Pass Comp",
"INTs",
"Pass TDs",
"Total Off Yards/Play",
"Total Off Plays",
"Total Off Yards",
"Punt Yards/Return",
"Punt Returns",
"Punt Return Yards",
"Punt Return TDs",
"KO Yards/Return",
"KO Returns",
"KO Return Yards",
"KO Return TDs",
"Yards/Punt",
"Punts",
"Punt Yards",
"INT returns",
"INT Yards",
"INT TDs",
"Fumbles",
"Fumbles Lost",
"Penalties",
"Penalty Yards",
"TOP/Game",
"3rd Down Conversion %",
"3rd Down Conversion Att",
"3rd Down Conversions",
"4th Down Conversion %",
"4th Down Conversion Att",
"4th Down Conversions",
"RZ Success %",
"RZ Att",
"RZ Scores",
"FG Success %",
"FG Att",
"FGs Made",
"PAT Success %",
"PAT Att",
"PATs Made",
"2-Point Success %",
"2-Point Att",
"2-Point Conversions",
]
opp_stat_titles = ["Opp " + title for title in team_stat_titles]
stat_titles = team_stat_titles + opp_stat_titles
all_stats = []
team_stats = []
opp_stats = []
# These are populated with the stat names and actual numbers
team_stats_dict = {}
opp_stats_dict = {}
# This will allow you to loop through each team's page and gather stats from each.
# for team_url in team_urls:
# team_page_soup = Scraper.soup_recipe(team_url)
# time.sleep(0.25)
# Delete below line of code when using GET requests
team_page_soup = BeautifulSoup(test_team_page, "html.parser")
# finds the statistics table
stats_table = team_page_soup.find("table", class_="team-statistics")
# find all rows
table_rows = stats_table.find_all("tr")
# The first row in useless for this program and is removed below
table_rows.pop(0)
# splits and strips each cell, then adds them to a specified dictionary. .strip is removing whitespace at the beginning and end of strings. Would like to use float to convert strings to numbers, but can't because time of possession won't convert to a number.
def process_cell(cell, l):
cell_text = cell.text
if cell_text == "-":
cell_text = "0"
if "2-Point Conversions:" in cell_text:
splt_char = "-"
cell_temp = cell_text.split(splt_char)
cell_split = splt_char.join(cell_temp[:2]), splt_char.join(cell_temp[2:])
else:
cell_split = cell_text.split("-")
for c in cell_split:
if c != "":
l.append(c.strip())
# loops through each row and names each stat while separating team stats from opponent stats to a different list.
for tr in table_rows:
# Find each of the three cells isolated in separate variables
cell1 = tr.find_next()
cell2 = cell1.find_next()
cell3 = cell2.find_next()
# process each cell and append results to separate lists
process_cell(cell2, team_stats)
process_cell(cell3, opp_stats)
team_stats.extend(opp_stats)
# Inserts the team name as the first list item
stat_titles.insert(0, "Team")
team_stats.insert(0, team_name)
all_stats.append(team_stats)
stats_df = | pd.DataFrame(all_stats, columns=stat_titles) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#"""
#Copyright [2020] [Indian Institute of Science, Bangalore & Tata Institute of Fundamental Research, Mumbai]
#SPDX-License-Identifier: Apache-2.0
#"""
__name__ = "Script for generating city files - instantiation of a synthetic city"
import os
import sys
import math
import argparse
import csv
import random
import json
import warnings
warnings.filterwarnings('ignore')
import geopandas as gpd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from shapely.geometry import Point, MultiPolygon
from computeDistributions import *
# Default Gloabal Prameters
interactive = 0
default_miniPop = 100000
default_city="bangalore"
default_ibasepath = 'data/base/bangalore/'
default_obasepath = 'data/bangalore-100K/'
a_workplacesize = 3.26
c_workplacesize = 0.97
m_max_workplacesize = 2870
avgSchoolsize = 300
# Handling inputs and interactions
if interactive:
city = default_city
miniPop = default_miniPop
ibasepath = default_ibasepath
obasepath = default_obasepath
else:
my_parser = argparse.ArgumentParser(description='Create mini-city for COVID-19 simulation')
my_parser.add_argument('-c', help='target city', default=default_city)
my_parser.add_argument('-n', help='target population', default=default_miniPop)
my_parser.add_argument('-i', help='input folder', default=default_ibasepath)
my_parser.add_argument('-o', help='output folder', default=default_obasepath)
args = my_parser.parse_args()
city = (args.c).lower()
miniPop = int(args.n)
ibasepath = args.i
obasepath = args.o
if ibasepath[-1]!='/':
ibasepath = ibasepath+'/'
if obasepath[-1]!='/':
obasepath = obasepath+'/'
# Workplace commute parameters
if city == 'bangalore':
a_commuter_distance = 10.751
b_commuter_distance = 5.384
m_max_commuter_distance = 35
if city == 'mumbai':
a_commuter_distance = 4 #parameter in distribution for commuter distance - Thailand paper
b_commuter_distance = 3.8 #parameter in distribution for commuter distance - Thailand paper
m_max_commuter_distance = 60
# Create output directory if not present
if not os.path.exists(obasepath):
os.mkdir(obasepath)
# Prepare input file paths
citygeojsonfile = ibasepath+"city.geojson"
demographicsfile = ibasepath+"demographics.csv"
employmentfile = ibasepath+"employment.csv"
householdfile = ibasepath+"households.csv"
cityprofilefile = ibasepath+"cityProfile.json"
slumfracfile = ibasepath+"slumFraction.csv"
slumclusterfile = ibasepath+"slumClusters.geojson"
ODMatrixfile = ibasepath+"ODMatrix.csv"
individualsjson = obasepath+"individuals.json"
housesjson = obasepath+"houses.json"
workplacesjson = obasepath+"workplaces.json"
schoolsjson = obasepath+"schools.json"
wardCentreDistancejson = obasepath+"wardCentreDistance.json"
commonAreajson = obasepath+"commonArea.json"
fractionPopulationjson = obasepath+"fractionPopulation.json"
#fixing for now
slum_schoolsize_factor = 2
slum_householdsize_scalefactor = 2
print("Creating city with a population of approximately ",miniPop,flush=True)
print("")
print("Reading city.geojson to get ward polygons...",end='',flush=True)
geoDF = gpd.read_file(citygeojsonfile)
geoDF['wardNo'] = geoDF['wardNo'].astype(int)
geoDF['wardIndex'] = geoDF['wardNo'] - 1
geoDF = geoDF[['wardIndex','wardNo', 'wardName', 'geometry']]
geoDF['wardBounds'] = geoDF.apply(lambda row: MultiPolygon(row['geometry']).bounds, axis=1)
geoDF['wardCentre'] = geoDF.apply(lambda row: (MultiPolygon(row['geometry']).centroid.x, MultiPolygon(row['geometry']).centroid.y), axis=1)
geoDF["neighbors"] = geoDF.apply(lambda row: ", ".join([str(ward) for ward in geoDF[~geoDF.geometry.disjoint(row['geometry'])]['wardNo'].tolist()]) , axis=1)
print("done.",flush=True)
if os.path.exists(slumfracfile):
print(slumfracfile,"exists... processing slum data",flush=True)
slum_flag = 1
slum_fractions = []
with open(slumfracfile, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if row[0]=='wardIndex':
continue
slum_fractions.append(float(row[2]))
if os.path.exists(slumclusterfile):
slumcluster_flag=1
print("Slum clustter file found. Parsing slum clusters...",end='',flush=True)
geoDFslums = gpd.read_file(slumclusterfile)
wardslums = [[] for _ in range(len(geoDF))]
for i in range(len(geoDFslums)):
for j in range(len(geoDF)):
if geoDFslums["geometry"][i].intersects(geoDF["geometry"][j]):
wardslums[j].append(i)
print("done.",flush=True)
else:
slumcluster_flag=0
print("Slum clustter file not found.",end='',flush=True)
else:
slum_flag=0
slumcluster_flag=0
print(slumfracfile,"does not exist... not processing slum data",flush=True)
print("Reading demographics, employment and household data (csv)...",end='',flush=True)
wardname = []
wardpop = []
wardarea = []
wardemployed = []
wardunemployed = []
wardworkforce = []
wardhouseholds = []
demographics = | pd.read_csv(demographicsfile) | pandas.read_csv |
import numpy as np
def interp1d_(x, y, x_new):
from scipy.interpolate import interp1d, pchip_interpolate
# return interp1d(x,y,kind='cubic')(x_new)
return pchip_interpolate(x, y, x_new)
def get_baseline_dff(fmean, fneuropil, cont_ratio=0.7, win_=3000, q=0.1):
import pandas as pd
fmean_comp = fmean-fneuropil*cont_ratio
if fmean_comp.min()<0:
fmean_comp = fmean_comp-fmean_comp.min()+100
baseline = | pd.Series(fmean_comp) | pandas.Series |
import getpass
import math
import pickle
from kivy.clock import Clock
from kivy.uix.textinput import TextInput
from kivymd.app import MDApp
from kivymd.uix.datatables import MDDataTable
from kivy.lang.builder import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.metrics import dp
import os
from datetime import datetime, date
from kivy.utils import get_color_from_hex
import pandas as pd
import win32clipboard
from fpdf import FPDF
from kivymd.uix.dialog import MDDialog
from kivymd.uix.selectioncontrol import MDCheckbox
from kivymd.uix.textfield import MDTextFieldRect
import glob
from reportlab.pdfgen import canvas
from PyPDF2 import PdfFileWriter, PdfFileReader
import win32com.client as win32
class TelaLogin(Screen):
pass
class AnalisesPendentes(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dialog = None
self.arquivos_assinatura = []
self.arquivos_pdf = []
self.tabela_pendentes = None
# Criar a marca d'agua com a assinatura
c = canvas.Canvas('watermark.pdf')
# Desenhar a imagem na posição x e y.
c.drawImage(getpass.getuser() + '.png', 440, 30, 100, 60, mask='auto')
c.save()
# Buscar o arquivo da marca d'agua criado
self.watermark = PdfFileReader(open(os.path.join('watermark.pdf'), 'rb'))
# Abrir arquivo contendo a pasta de trabalho e os emails dos usuarios
with open('dados.txt', 'r', encoding='UTF-8') as bd:
self.dados = bd.readlines()
self.diretorio = self.dados[0].rstrip('\n')
def add_datatable(self): # Adicionar tabela com as análises pendentes
self.arquivos_assinatura.clear()
self.arquivos_pdf.clear()
self.arquivos_diretorio = os.listdir(self.diretorio)
for item in self.arquivos_diretorio: # Selecionar arquivos pdf para mostrar na tabela
if item.endswith('.pdf') is True and item != 'watermark.pdf':
dt_modificacao = os.path.getctime(self.diretorio)
dt_modificacao = datetime.fromtimestamp(dt_modificacao)
data = date.strftime(dt_modificacao, '%d/%m/%Y')
self.arquivos_pdf.append((item, data))
if len(self.arquivos_pdf) == 1:
self.arquivos_pdf.append(('', ''))
self.tabela_pendentes = MDDataTable(pos_hint={'center_x': 0.5, 'center_y': 0.5},
size_hint=(0.4, 0.55),
check=True, use_pagination=True,
background_color_header=get_color_from_hex("#0d7028"),
column_data=[("[color=#ffffff]Análise[/color]", dp(90)),
("[color=#ffffff]Data[/color]", dp(30))],
row_data=self.arquivos_pdf, elevation=1)
self.add_widget(self.tabela_pendentes)
self.tabela_pendentes.bind(on_row_press=self.abrir_pdf)
self.tabela_pendentes.bind(on_check_press=self.marcar_pdf)
def marcar_pdf(self, instance_row, current_row): # Marcar arquivos para assinar
self.arquivos_assinatura.append(current_row[0])
print(current_row)
def abrir_pdf(self, instance_table, current_row): # Abrir pdf
if self.tabela_pendentes.get_row_checks():
pass
else:
try:
os.startfile(os.path.join(self.diretorio, current_row.text))
except FileNotFoundError:
self.dialog = MDDialog(text="Clique sobre o texto Análise Tributária...!", radius=[20, 7, 20, 7], )
self.dialog.open()
def assinatura(self): # Assinar arquivos selecionados
self.salvos = []
for n, arquivo in enumerate(self.arquivos_assinatura):
os.chdir(self.diretorio)
self.output_file = PdfFileWriter()
with open(arquivo, 'rb') as f:
input_file = PdfFileReader(f)
# Número de páginas do documento
page_count = input_file.getNumPages()
# Percorrer o arquivo para adicionar a marca d'agua
for page_number in range(page_count):
input_page = input_file.getPage(page_number)
if page_number == page_count - 1:
input_page.mergePage(self.watermark.getPage(0))
self.output_file.addPage(input_page)
self.dir_acima = self.diretorio.split('\\')
self.dir_acima.insert(1, '\\')
self.dir_acima = os.path.join(*self.dir_acima[:-1])
os.chdir(self.dir_acima)
file = glob.glob(str(arquivo[21:32]) + '*')
pasta_analise = ''.join(file)
try:
os.chdir(pasta_analise)
except OSError:
os.chdir(self.dir_acima)
# Gerar o novo arquivo pdf assinado
with open('Análise Tributária - ' + str(arquivo[21:]), "wb") as outputStream:
self.output_file.write(outputStream)
os.chdir(self.diretorio)
os.remove(arquivo)
self.salvos.append(n)
troca = 0
for i in self.salvos:
self.arquivos_pdf.pop(i - troca)
troca += 1
outlook = win32.Dispatch('outlook.application')
# criar um email
email = outlook.CreateItem(0)
# configurar as informações do e-mail e selecionar o endereço pelo arquivo de texto
email.To = self.dados[2]
email.Subject = "E-mail automático Análise Tributária"
email.HTMLBody = f"""
<p>Análise(s) Tributária(s) assinada(s) com sucesso.</p>
"""
email.Send()
self.dialog = MDDialog(text='Análise(s) assinada(s) com sucesso!', radius=[20, 7, 20, 7], )
self.dialog.open()
self.add_datatable() # Atualizar lista de análises
class CarregarAnalise(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lista_analises = [] # Recebe dados do arquivo de texto com analises salvas
self.temp_list = [] # Recebe dados brutos do pickle
self.dados_tabela = None # Criar Tabela para exibição
def carregar_dados(self):
with open(os.path.join(self.manager.get_screen('pendentes').diretorio, 'Base.txt'), "rb") as carga:
while True:
try:
self.temp_list.append(pickle.load(carga))
except EOFError:
break
for n, item in enumerate(self.temp_list):
itens = (item[2], item[0])
self.lista_analises.append(itens)
self.lista_analises.sort(key=lambda lista: datetime.strptime(lista[1], '%d/%m/%Y, %H:%M:%S'), reverse=True)
if len(self.lista_analises) == 1:
self.lista_analises.append(('', ''))
self.dados_tabela = MDDataTable(pos_hint={'center_x': 0.5, 'center_y': 0.5},
size_hint=(0.4, 0.75), rows_num=10,
use_pagination=True,
background_color_header=get_color_from_hex("#0d7028"),
check=True,
column_data=[("[color=#ffffff]Análise[/color]", dp(70)),
("[color=#ffffff]Data[/color]", dp(70))],
row_data=self.lista_analises, elevation=1)
self.add_widget(self.dados_tabela)
self.dados_tabela.bind(on_row_press=self.abrir_dados)
def abrir_dados(self, instance_table, current_row): # Pegar informações do txt e enviar para os inputs
verinfo3 = int(current_row.index / 2)
self.temp_list.sort(key=lambda lista: datetime.strptime(lista[0], '%d/%m/%Y, %H:%M:%S'), reverse=True)
self.manager.get_screen("nova").ids.gere.text = self.temp_list[int(verinfo3)][1]
self.manager.get_screen("nova").ids.proc.text = self.temp_list[int(verinfo3)][2]
self.manager.get_screen("nova").ids.req.text = self.temp_list[int(verinfo3)][3]
self.manager.get_screen("nova").ids.orcam_sim.state = 'down' if self.temp_list[int(verinfo3)][
4] == 'down' else 'normal'
self.manager.get_screen("nova").ids.orcam_nao.state = 'normal' if self.temp_list[int(verinfo3)][
4] == 'down' else 'down'
self.manager.get_screen("nova").ids.objcust.text = self.temp_list[int(verinfo3)][5]
self.manager.get_screen("nova").ids.check1.active = self.temp_list[int(verinfo3)][6]
self.manager.get_screen("nova").ids.check2.active = self.temp_list[int(verinfo3)][7]
self.manager.get_screen("nova").ids.check3.active = self.temp_list[int(verinfo3)][8]
self.manager.get_screen("nova").ids.objeto.text = self.temp_list[int(verinfo3)][9].strip()
self.manager.get_screen("nova").ids.valor.text = self.temp_list[int(verinfo3)][10]
self.manager.get_screen("nova").ids.complem.text = self.temp_list[int(verinfo3)][11].strip()
for r, val in enumerate(self.temp_list[int(verinfo3)][12]):
for b, value in enumerate(val):
self.manager.get_screen("nova").lista_mat[b][r].text = value
self.manager.get_screen("nova").ids.linha_mat.text = self.temp_list[int(verinfo3)][13]
self.manager.get_screen("nova").ids.serv.text = self.temp_list[int(verinfo3)][14].strip()
self.manager.get_screen("nova").ids.iva.text = self.temp_list[int(verinfo3)][15]
for r, val in enumerate(self.temp_list[int(verinfo3)][16]):
for b, value in enumerate(val):
self.manager.get_screen("nova").lista_serv[b][r].text = value
self.manager.get_screen("nova").ids.linha_serv.text = self.temp_list[int(verinfo3)][17]
self.manager.get_screen("nova").ids.obs.text = self.temp_list[int(verinfo3)][18].strip().strip()
self.manager.get_screen("nova").ids.obs_serv.text = self.temp_list[int(verinfo3)][19].strip()
self.manager.get_screen("nova").ids.obs1.text = self.temp_list[int(verinfo3)][20].strip()
self.manager.get_screen("nova").ids.obs2.text = self.temp_list[int(verinfo3)][21].strip()
for n, i in enumerate(self.temp_list[int(verinfo3)][22]):
self.manager.get_screen("nova").infos[n].text = i.strip()
for n, i in enumerate(self.temp_list[int(verinfo3)][23]):
if i == 'down':
self.manager.get_screen("nova").lista_check[n].state = 'down'
self.manager.get_screen("nova").ids.linha_cont.text = self.temp_list[int(verinfo3)][24]
self.manager.get_screen("nova").ids.linha_obs.text = self.temp_list[int(verinfo3)][25]
self.manager.current = 'nova'
class NovaAnalise(Screen):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.lista_mat = [[], [], [], [], [], [], [], []] # Lista para trabalhar com 8 posições de colunas de materiais
self.entradas_mat = [] # Recebe os inputs criados para os dados de materiais
self.lista_serv = [[], [], []] # Lista para trabalhar com as 3 posições de colunas de serviços
self.entradas = [] # Recebe os inputs criados para os dados de servicos
self.posicao = [] # Identifica qual linha da tabela os dados serão alocados para materiais e serviços
self.lista_check = [] # Guardar o status dos botões de check das cláusulas
self.infos = [] # Carregar informações das cláusulas de contrato
self.data_mat = [] # Lista para incluir dados dos materiais no pdf
self.data = [] # Lista para incluir dados de serviços no pdf
Clock.schedule_once(self.cria_tabela_materiais)
Clock.schedule_once(self.cria_tabela_servicos)
Clock.schedule_once(self.clausulas)
Clock.schedule_once(self.informacoes_padrao)
def informacoes_padrao(self, dt):
self.ids.obs.text = 'Produto para Consumo Final. \nFabricante: Alíquota de ICMS de 18% conforme RICMS-SP/2000,'\
' Livro I, Título III, Capítulo II, Seção II, Artigo 52, Inciso I \nRevendedor: ' \
'Informar o ICMS-ST recolhido anteriormente\n\nEntrega de materiais em local diverso do ' \
'destinatário: o endereço deverá constar na nota fiscal em campo específico do xml ' \
'(bloco G) e em dados adicionais. (Regime Especial 28558/2018).'
self.ids.obs1.text = 'Obs 1: Caso o fornecedor possua alguma especificidade que implique tratamento tributário'\
' diverso do exposto acima, ou seja do regime tributário "SIMPLES NACIONAL" deverá' \
' apresentar documentação hábil que comprove sua condição peculiar, a qual será alvo de' \
' análise prévia pela GECOT.'
self.ids.obs2.text = 'Obs 2: Essa Análise não é exaustiva, podendo sofrer alterações no decorrer do processo ' \
'de contratação em relação ao produto/serviço.'
def cria_tabela_materiais(self, dt):
for i in range(61):
for c in range(8):
if c == 1:
largura = .2
elif c == 0:
largura = .1
elif c == 3:
largura = .1
else:
largura = .05
self.mater = MDTextFieldRect(multiline=False, size_hint=(largura, .05), write_tab=False)
self.entradas_mat.append(self.mater)
self.lista_mat[c].append(self.mater)
self.manager.get_screen("nova").ids.grid_teste.add_widget(self.mater)
for i, n in enumerate(self.entradas_mat):
if i % 8 == 0:
self.entradas_mat[i].bind(on_text_validate=self.busca_dados_mat_clipboard)
self.entradas_mat[i + 1].bind(focus=self.busca_dados_mat)
def busca_dados_mat(self, instance, widget):
cad_mat = pd.read_excel(os.path.join(self.manager.get_screen("pendentes").diretorio, 'cadastro.xlsx'),
sheet_name='materiais', converters={'Material': str, 'IPI': str})
cad_mat = pd.DataFrame(cad_mat)
for i, l in enumerate(self.entradas_mat):
if i % 8 == 0:
if l.text != '' and self.entradas_mat[i + 1].text == '':
for index, row in cad_mat.iterrows():
if l.text == row['Material']:
campo = cad_mat.loc[index, 'Texto breve material']
campo = campo[:32]
self.entradas_mat[i + 1].text = campo
self.entradas_mat[i + 3].text = cad_mat.loc[index, 'Ncm']
self.entradas_mat[i + 5].text = cad_mat.loc[index, 'IPI']
break
def busca_dados_mat_clipboard(self, instance):
for i, l in enumerate(self.entradas_mat):
if i % 8 == 0:
if l.text != '' and self.entradas_mat[i + 1].text == '':
self.posicao = int(i / 8) if i > 0 else i
break
cad_mat = pd.read_excel(os.path.join(self.manager.get_screen("pendentes").diretorio, 'cadastro.xlsx'),
sheet_name='materiais', converters={'Material': str, 'IPI': str})
cad_mat['Material'] = cad_mat['Material'].astype(str)
win32clipboard.OpenClipboard()
rows = win32clipboard.GetClipboardData()
win32clipboard.EmptyClipboard()
win32clipboard.CloseClipboard()
rows = rows.split('\n')
rows.pop() if len(rows) > 1 else rows
for r, val in enumerate(rows):
values = val.split('\t')
if len(values) > 1:
del values[1:]
for b, value in enumerate(values):
for index, row in cad_mat.iterrows():
self.lista_mat[b][r + self.posicao].text = value
if value.strip() == row['Material']:
campo = cad_mat.loc[index, 'Texto breve material']
campo = campo[:32]
self.lista_mat[b + 1][r + self.posicao].text = campo
self.lista_mat[b + 3][r + self.posicao].text = cad_mat.loc[index, 'Ncm']
self.lista_mat[b + 5][r + self.posicao].text = cad_mat.loc[index, 'IPI']
def preenche_iva(self):
for e, item in enumerate(self.entradas_mat):
if e % 8 == 0 and e != 0:
if item.text != '':
self.entradas_mat[e + 2].text = self.entradas_mat[2].text
self.ids.check_iva.active = False
def preenche_ncm(self):
for e, item in enumerate(self.entradas_mat):
if e % 8 == 0 and e != 0:
if item.text != '':
self.entradas_mat[e + 3].text = self.entradas_mat[3].text
def preenche_aliq(self):
for e, item in enumerate(self.entradas_mat):
if e % 8 == 0:
if item.text != '':
self.entradas_mat[e + 4].text = '18%'
self.entradas_mat[e + 6].text = '1,65%'
self.entradas_mat[e + 7].text = '7,6%'
def limpa_dados_mat(self):
for lin in self.entradas_mat:
lin.text = ''
def limpa_dados_serv(self):
for lin in self.entradas:
lin.text = ''
def cria_tabela_servicos(self, dt):
for i in range(90):
for c in range(3):
if c == 1:
largura = 30
else:
largura = 15
self.serv = MDTextFieldRect(multiline=False, size_hint=(largura, .05), write_tab=False)
self.entradas.append(self.serv)
self.lista_serv[c].append(self.serv)
self.ids.grid_serv.add_widget(self.serv)
for i, n in enumerate(self.entradas):
if i % 3 == 0:
self.entradas[i].bind(on_text_validate=self.busca_dados_serv_clipboard)
self.entradas[i + 1].bind(focus=self.busca_dados_serv)
def busca_dados_serv(self, instance, widget):
serv_cad = pd.read_excel(os.path.join(self.manager.get_screen("pendentes").diretorio, 'cadastro.xlsx'),
sheet_name='servicos')
serv_cad = pd.DataFrame(serv_cad)
serv_cad['Nº de serviço'] = serv_cad['Nº de serviço'].astype(str)
for i, l in enumerate(self.entradas):
if l.text != '' and self.entradas[i + 1].text == '':
self.posicao = int(i / 3) if i > 0 else i
for index, row in serv_cad.iterrows():
if l.text == row['Nº de serviço']:
self.entradas[i + 1].text = serv_cad.loc[index, 'Denominação']
self.entradas[i + 2].text = str(int(serv_cad.loc[index, 'Classe avaliaç.']))
break
def busca_dados_serv_clipboard(self, instance):
for i, l in enumerate(self.entradas):
if l.text == '':
self.posicao = int(i / 3) if i > 0 else i
print(self.posicao)
break
serv_cad = pd.read_excel(os.path.join(self.manager.get_screen("pendentes").diretorio, 'cadastro.xlsx'),
sheet_name='servicos')
serv_cad = pd.DataFrame(serv_cad)
serv_cad['Nº de serviço'] = serv_cad['Nº de serviço'].astype(str)
win32clipboard.OpenClipboard()
rows = win32clipboard.GetClipboardData()
win32clipboard.EmptyClipboard()
win32clipboard.CloseClipboard()
rows = rows.split('\n')
rows.pop() if len(rows) > 1 else rows
for r, val in enumerate(rows):
values = val.split('\t')
if len(values) > 1:
del values[1:]
for b, value in enumerate(values):
for index, row in serv_cad.iterrows():
self.lista_serv[b][r + self.posicao].text = value
if value == row['Nº de serviço']:
self.lista_serv[b + 1][r + self.posicao].text = serv_cad.loc[index, 'Denominação']
self.lista_serv[b + 2][r + self.posicao].text = str(int(serv_cad.loc[index, 'Classe avaliaç.']))
def busca_dados_lei_116(self):
if self.ids.cod_serv.text != '':
data_serv = pd.read_excel(os.path.join(self.manager.get_screen("pendentes").diretorio, 'cadastro.xlsx'),
sheet_name='116', dtype=str)
data_serv = | pd.DataFrame(data_serv) | pandas.DataFrame |
### import used modules first
from TPM.localization import select_folder
from glob import glob
import random
import string
import numpy as np
import os
import datetime
import pandas as pd
import scipy.linalg as la
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from numpy import unique
from numpy import where
from sklearn.datasets import make_classification
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score
### get analyzed sheet names
## path_dat:list of path; sheet_names:list of string, axis=0(add vertically)
def get_df_dict(path_data, sheet_names, axis):
df_dict = dict()
for i, path in enumerate(path_data):
for sheet_name in sheet_names:
if i==0: ## initiate df_dict
df = pd.read_excel(path, sheet_name=sheet_name)
df_dict[f'{sheet_name}'] = df
else: ## append df_dict
df = pd.read_excel(path, sheet_name=sheet_name)
df_dict[f'{sheet_name}'] = | pd.concat([df_dict[f'{sheet_name}'], df], axis=axis) | pandas.concat |
from apiclient.discovery import build
import pandas as pd
import sys
from datetime import datetime
time = datetime.now().strftime('_%Y-%m-%d_%H_%M_%S')
# CREDENTIALS
DEVELOPER_KEY = "YOUR API KEY"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(q, max_results=50,order="relevance", token=None, location=None, location_radius=None):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q,
type="video",
pageToken=token,
order = order,
part="id,snippet",
maxResults=max_results,
location=location,
videoDuration = 'any',
locationRadius=location_radius).execute()
title = []
description = []
channelId = []
channelTitle = []
categoryId = []
videoId = []
viewCount = []
likeCount = []
dislikeCount = []
commentCount = []
favoriteCount = []
category = []
tags = []
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
title.append(search_result['snippet']['title'])
description.append(search_result['snippet']['description'])
videoId.append(search_result['id']['videoId'])
response = youtube.videos().list(
part='statistics, snippet',
id=search_result['id']['videoId']).execute()
if 'channelId' in response['items'][0]['snippet'].keys():
channelId.append(response['items'][0]['snippet']['channelId'])
if 'channelTitle' in response['items'][0]['snippet'].keys():
channelTitle.append(response['items'][0]['snippet']['channelTitle'])
if 'categoryId' in response['items'][0]['snippet'].keys():
categoryId.append(response['items'][0]['snippet']['categoryId'])
if 'favoriteCount' in response['items'][0]['statistics'].keys():
favoriteCount.append(response['items'][0]['statistics']['favoriteCount'])
if 'viewCount' in response['items'][0]['statistics'].keys():
viewCount.append(response['items'][0]['statistics']['viewCount'])
if 'likeCount' in response['items'][0]['statistics'].keys():
likeCount.append(response['items'][0]['statistics']['likeCount'])
if 'dislikeCount' in response['items'][0]['statistics'].keys():
dislikeCount.append(response['items'][0]['statistics']['dislikeCount'])
if 'commentCount' in response['items'][0]['statistics'].keys():
commentCount.append(response['items'][0]['statistics']['commentCount'])
else:
commentCount.append([])
if 'tags' in response['items'][0]['snippet'].keys():
tags.append(response['items'][0]['snippet']['tags'])
else:
tags.append([])
youtube_dict = {'tags': tags, 'channelId': channelId, 'channelTitle': channelTitle,
'categoryId':categoryId, 'title': title, 'videoId': videoId,
'viewCount': viewCount, 'likeCount': likeCount,
'dislikeCount': dislikeCount, 'commentCount': commentCount,
'favoriteCount':favoriteCount, 'description': description}
return youtube_dict
df = pd.read_csv('CSV_FILE_HERE') # Example: "yt.csv"
links = list(df["video link"])
video_data = []
i = 0
for link in links:
print("Video {} of {}".format(i+1, len(links)))
results = youtube_search(link)
video_data.append(results)
i = i+1
df = | pd.DataFrame(data=video_data) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:37:53 2019
@author: sdenaro
"""
import pandas as pd
import numpy as np
def setup(year,operating_horizon,perfect_foresight):
#read generator parameters into DataFrame
df_gen = pd.read_csv('PNW_data_file/generators.csv',header=0)
zone = ['PNW']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zone]
df_load = df_load.loc[year*8760:year*8760+8759,:]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/PNW_dispatchable_hydro.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'PNW']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
##time series solar for each TAC
df_solar = pd.read_csv('PNW_data_file/solar.csv',header=0)
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/PNW_dispatchable_imports.csv',header=0)
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
df_imports3 = pd.read_csv('Path_setup/PNW_dispatchable_3.csv',header=0)
df_imports8 = pd.read_csv('Path_setup/PNW_dispatchable_8.csv',header=0)
df_imports14 = pd.read_csv('Path_setup/PNW_dispatchable_14.csv',header=0)
df_imports65 = pd.read_csv('Path_setup/PNW_dispatchable_65.csv',header=0)
df_imports66 = pd.read_csv('Path_setup/PNW_dispatchable_66.csv',header=0)
##hourly time series of exports by zone
df_exports3 = pd.read_csv('Path_setup/PNW_exports3.csv',header=0)
df_exports8 = pd.read_csv('Path_setup/PNW_exports8.csv',header=0)
df_exports14 = pd.read_csv('Path_setup/PNW_exports14.csv',header=0)
df_exports65 = | pd.read_csv('Path_setup/PNW_exports65.csv',header=0) | pandas.read_csv |
import numpy as np
#exec(open(r'D:\OneDrive\documents\Projects\trader\trendln\trendln\__init__.py').read())
def datefmt(xdate, cal=None):
from pandas.tseries.holiday import AbstractHolidayCalendar, Holiday, nearest_workday, \
USMartinLutherKingJr, USPresidentsDay, GoodFriday, USMemorialDay, \
USLaborDay, USThanksgivingDay
from pandas.tseries.offsets import CustomBusinessDay
class USTradingCalendar(AbstractHolidayCalendar):
rules = [
| Holiday('NewYearsDay', month=1, day=1, observance=nearest_workday) | pandas.tseries.holiday.Holiday |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
def test_usecols_with_parse_dates(all_parsers, usecols):
# see gh-9755
data = """a,b,c,d,e
0,1,20140101,0900,4
0,1,20140102,1000,4"""
parser = all_parsers
parse_dates = [[1, 2]]
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates2(all_parsers):
# see gh-13604
parser = all_parsers
data = """2008-02-07 09:40,1032.43
2008-02-07 09:50,1042.54
2008-02-07 10:00,1051.65"""
names = ["date", "values"]
usecols = names[:]
parse_dates = [0]
index = Index(
[
Timestamp("2008-02-07 09:40"),
Timestamp("2008-02-07 09:50"),
Timestamp("2008-02-07 10:00"),
],
name="date",
)
cols = {"values": [1032.43, 1042.54, 1051.65]}
expected = DataFrame(cols, index=index)
result = parser.read_csv(
StringIO(data),
parse_dates=parse_dates,
index_col=0,
usecols=usecols,
header=None,
names=names,
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates3(all_parsers):
# see gh-14792
parser = all_parsers
data = """a,b,c,d,e,f,g,h,i,j
2016/09/21,1,1,2,3,4,5,6,7,8"""
usecols = list("abcdefghij")
parse_dates = [0]
cols = {
"a": Timestamp("2016-09-21"),
"b": [1],
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=usecols)
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
def test_usecols_with_parse_dates4(all_parsers):
data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"
usecols = list("abcdefghij")
parse_dates = [[0, 1]]
parser = all_parsers
cols = {
"a_b": "2016/09/21 1",
"c": [1],
"d": [2],
"e": [3],
"f": [4],
"g": [5],
"h": [6],
"i": [7],
"j": [8],
}
expected = DataFrame(cols, columns=["a_b"] + list("cdefghij"))
result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])
@pytest.mark.parametrize(
"names",
[
list("abcde"), # Names span all columns in original data.
list("acd"), # Names span only the selected columns.
],
)
def test_usecols_with_parse_dates_and_names(all_parsers, usecols, names):
# see gh-9755
s = """0,1,20140101,0900,4
0,1,20140102,1000,4"""
parse_dates = [[1, 2]]
parser = all_parsers
cols = {
"a": [0, 0],
"c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],
}
expected = DataFrame(cols, columns=["c_d", "a"])
result = parser.read_csv(
StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols
)
tm.assert_frame_equal(result, expected)
def test_usecols_with_unicode_strings(all_parsers):
# see gh-13219
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"AAA": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"BBB": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["AAA", "BBB"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_single_byte_unicode_strings(all_parsers):
# see gh-13219
data = """A,B,C,D
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"A": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"B": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=["A", "B"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [["AAA", b"BBB"], [b"AAA", "BBB"]])
def test_usecols_with_mixed_encoding_strings(all_parsers, usecols):
data = """AAA,BBB,CCC,DDD
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [["あああ", "いい"], ["あああ", "いい"]])
def test_usecols_with_multi_byte_characters(all_parsers, usecols):
data = """あああ,いい,ううう,ええええ
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
exp_data = {
"あああ": {0: 0.056674972999999997, 1: 2.6132309819999997, 2: 3.5689350380000002},
"いい": {0: 8, 1: 2, 2: 7},
}
expected = DataFrame(exp_data)
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
def test_empty_usecols(all_parsers):
data = "a,b,c\n1,2,3\n4,5,6"
expected = DataFrame()
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=set())
tm.assert_frame_equal(result, expected)
def test_np_array_usecols(all_parsers):
# see gh-12546
parser = all_parsers
data = "a,b,c\n1,2,3"
usecols = np.array(["a", "b"])
expected = DataFrame([[1, 2]], columns=usecols)
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
(
lambda x: x.upper() in ["AAA", "BBB", "DDD"],
DataFrame(
{
"AaA": {
0: 0.056674972999999997,
1: 2.6132309819999997,
2: 3.5689350380000002,
},
"bBb": {0: 8, 1: 2, 2: 7},
"ddd": {0: "a", 1: "b", 2: "a"},
}
),
),
(lambda x: False, DataFrame()),
],
)
def test_callable_usecols(all_parsers, usecols, expected):
# see gh-14154
data = """AaA,bBb,CCC,ddd
0.056674973,8,True,a
2.613230982,2,False,b
3.568935038,7,False,a"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("usecols", [["a", "c"], lambda x: x in ["a", "c"]])
def test_incomplete_first_row(all_parsers, usecols):
# see gh-6710
data = "1,2\n1,2,3"
parser = all_parsers
names = ["a", "b", "c"]
expected = DataFrame({"a": [1, 1], "c": [np.nan, 3]})
result = parser.read_csv(StringIO(data), names=names, usecols=usecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,usecols,kwargs,expected",
[
# see gh-8985
(
"19,29,39\n" * 2 + "10,20,30,40",
[0, 1, 2],
{"header": None},
| DataFrame([[19, 29, 39], [19, 29, 39], [10, 20, 30]]) | pandas.DataFrame |
import pandas as pd
import numpy as np
from statsmodels.discrete.discrete_model import Probit
#First regression table
def table2_reg(df_reg, disp_it):
"""Function to create the tables for the first probit models.
Args:
dataFrame containing the categorial variables as dummies and the interaction terms
disp_it boolean value indicating whether information about iterations should be displayed
Returns:
-------
A table containing the regression output of the first 4 model specifications.
"""
#first model
Y = df_reg['_oral']
X = df_reg[['sales', 'd1970', 'dsalesX1970', '_Phys', 'd_PhysX1970', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970']]
X['int'] = np.repeat(1, len(Y))
model1 = Probit(Y,X)
probit_model1 = model1.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model1.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeff1 = probit_model1.get_margeff()
#probit_margeff1.summary()
#second model
Y = df_reg['_oral']
X = df_reg[['sales', 'd1970', 'dsalesX1970', '_Phys', 'd_PhysX1970', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'any', 'anyX1970']]
X['int'] = np.repeat(1, len(Y))
model2 = Probit(Y,X)
probit_model2 = model2.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model2.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeff2 = probit_model2.get_margeff()
probit_margeff2.summary()
#third model
Y = df_reg['_oral']
X = df_reg[['sales', 'd1970', 'dsalesX1970', '_Phys', 'd_PhysX1970', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'any', 'anyX1970','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35',\
'd_agecat20X1970', 'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','_Catholic' ,'_CatholicX1970',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', 'd_ed_cat13X1970', \
'd_ed_cat16X1970', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', 'd_hinccat1X1970', 'd_hinccat2X1970',
'd_hinccat3X1970', 'd_hinccat4X1970']]
X['int'] = np.repeat(1, len(Y))
model3 = Probit(Y,X)
probit_model3 = model3.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model3.summary())
#compute margins (get_margeff)
probit_margeff3 = probit_model3.get_margeff()
#probit_margeff3.summary()
#fourth model
Y = df_reg['_oral']
X = df_reg[['sales', 'd1970', 'dsalesX1970', '_Phys', 'd_PhysX1970', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'any', 'anyX1970','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35',\
'd_agecat20X1970', 'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','_Catholic' ,'_CatholicX1970',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', 'd_ed_cat13X1970', \
'd_ed_cat16X1970', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', 'd_hinccat1X1970', 'd_hinccat2X1970',
'd_hinccat3X1970', 'd_hinccat4X1970', 'd_idealcat2', 'd_idealcat3', 'd_idealcat4', 'd_idealcat5', 'd_idealcat2X1970', \
'd_idealcat3X1970', 'd_idealcat4X1970', 'd_idealcat5X1970']]
X['int'] = np.repeat(1, len(Y))
model4 = Probit(Y,X)
probit_model4 = model4.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model4.summary())
#compute margins (get_margeff)
probit_margeff4 = probit_model4.get_margeff()
#print(probit_margeff4.summary())
table = pd.DataFrame({'(1)': [], '(2)': [], '(3)': [], '(4)': []})
table[' '] = ['Sales ban', '','p-value', 'Sales ban x 1(1970)', ' ','p-value', 'Observations', 'Log Likelihood', \
'Additional Covariates', 'Legal Variables']
table = table.set_index(' ')
table['(1)'] = [round(probit_margeff1.margeff[0],3), '({})'.format(round(probit_margeff1.margeff_se[0],3)), round(probit_margeff1.pvalues[0],3), round(probit_margeff1.margeff[2],3), \
'({})'.format(round(probit_margeff1.margeff_se[2],3)), round(probit_margeff1.pvalues[2],3), round(probit_margeff1.results.nobs,3), round(probit_margeff1.results.llf,3),\
'R','PX' ]
table['(2)'] = [round(probit_margeff2.margeff[0],3), '({})'.format(round(probit_margeff2.margeff_se[0],3)), round(probit_margeff2.pvalues[0],3), round(probit_margeff2.margeff[2],3), \
'({})'.format(round(probit_margeff2.margeff_se[2],3)), round(probit_margeff2.pvalues[2],3), round(probit_margeff2.results.nobs,3), round(probit_margeff2.results.llf,3),\
'R','PX, AD' ]
table['(3)'] = [round(probit_margeff3.margeff[0],3), '({})'.format(round(probit_margeff3.margeff_se[0],3)), round(probit_margeff3.pvalues[0],3), round(probit_margeff3.margeff[2],3), \
'({})'.format(round(probit_margeff3.margeff_se[2],3)), round(probit_margeff3.pvalues[2],3), round(probit_margeff3.results.nobs,3), round(probit_margeff3.results.llf,3),\
'R,A,C,E,I','PX, AD' ]
table['(4)'] = [round(probit_margeff4.margeff[0],3), '({})'.format(round(probit_margeff4.margeff_se[0],3)), round(probit_margeff4.pvalues[0],3), round(probit_margeff4.margeff[2],3), \
'({})'.format(round(probit_margeff4.margeff_se[2],3)), round(probit_margeff4.pvalues[2],3), round(probit_margeff4.results.nobs,3), round(probit_margeff4.results.llf,3),\
'R,A,C,E,I','PX, AD, K' ]
return table, model1, model2, model3, model4
#Second regression table
def table3_reg(df_reg, disp_it):
"""Function to create the tables for the second probit models.
Args:
dataFrame containing the categorial variables as dummies and the interaction terms
Returns:
-------
A table containing the regression output of the 8 model specifications for the second table.
"""
#1. _everuse_d as dependent variable
#first model
Y = df_reg['_everuse_d']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'dreg2X1965', 'dreg3X1965', 'dreg4X1965']]
X['int'] = np.repeat(1, len(Y))
model1 = Probit(Y,X)
probit_model1 = model1.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model1.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeff1 = probit_model1.get_margeff()
#probit_margeff1.summary()
#second model
Y = df_reg['_everuse_d']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970', 'anyX1965']]
X['int'] = np.repeat(1, len(Y))
model2 = Probit(Y,X)
probit_model2 = model2.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model2.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeff2 = probit_model2.get_margeff()
probit_margeff2.summary()
#third model
Y = df_reg['_everuse_d']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970','dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970',
'anyX1965','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35', 'd_agecat20X1970', \
'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','d_agecat20X1965', 'd_agecat25X1965', \
'd_agecat30X1965', 'd_agecat35X1965','_Catholic' ,'_CatholicX1970', '_CatholicX1965',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', \
'd_ed_cat13X1970', 'd_ed_cat9X1965', 'd_ed_cat12X1965', 'd_ed_cat13X1965', \
'd_ed_cat16X1970','d_ed_cat16X1965', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', \
'd_hinccat1X1970', 'd_hinccat2X1970', \
'd_hinccat3X1970', 'd_hinccat4X1970', 'd_hinccat1X1965', 'd_hinccat2X1965', 'd_hinccat3X1965', \
'd_hinccat4X1965']]
X['int'] = np.repeat(1, len(Y))
model3 = Probit(Y,X)
probit_model3 = model3.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model3.summary())
#compute margins (get_margeff)
probit_margeff3 = probit_model3.get_margeff()
probit_margeff3.summary()
#fourth model
Y = df_reg['_everuse_d']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970','dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970',
'anyX1965','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35', 'd_agecat20X1970', \
'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','d_agecat20X1965', 'd_agecat25X1965', \
'd_agecat30X1965', 'd_agecat35X1965','_Catholic' ,'_CatholicX1970', '_CatholicX1965',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', \
'd_ed_cat13X1970', 'd_ed_cat9X1965', 'd_ed_cat12X1965', 'd_ed_cat13X1965', \
'd_ed_cat16X1970','d_ed_cat16X1965', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', \
'd_hinccat1X1970', 'd_hinccat2X1970', \
'd_hinccat3X1970', 'd_hinccat4X1970', 'd_hinccat1X1965', 'd_hinccat2X1965', 'd_hinccat3X1965', \
'd_hinccat4X1965', 'd_idealcat2', 'd_idealcat3', 'd_idealcat4', 'd_idealcat5', 'd_idealcat2X1970', \
'd_idealcat3X1970', 'd_idealcat4X1970', 'd_idealcat5X1970', 'd_idealcat2X1965', \
'd_idealcat3X1965', 'd_idealcat4X1965', 'd_idealcat5X1965']]
X['int'] = np.repeat(1, len(Y))
model4 = Probit(Y,X)
probit_model4 = model4.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model4.summary())
#compute margins (get_margeff)
probit_margeff4 = probit_model4.get_margeff()
probit_margeff4.summary()
#store results
model1_help = model1
model2_help = model2
model3_help = model3
model4_help = model3
#2. _barrier as dependent variable
#first model
Y = df_reg['_barrier']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'dreg2X1965', 'dreg3X1965', 'dreg4X1965']]
X['int'] = np.repeat(1, len(Y))
model1 = Probit(Y,X)
probit_model1 = model1.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model1.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeffb1 = probit_model1.get_margeff()
probit_margeffb1.summary()
#second model
Y = df_reg['_barrier']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970', 'dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970', 'anyX1965']]
X['int'] = np.repeat(1, len(Y))
model2 = Probit(Y,X)
probit_model2 = model2.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model2.summary()) #got same results as paper
#compute margins (get_margeff)
probit_margeffb2 = probit_model2.get_margeff()
probit_margeffb2.summary()
#third model
Y = df_reg['_barrier']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970','dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970',
'anyX1965','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35', 'd_agecat20X1970', \
'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','d_agecat20X1965', 'd_agecat25X1965', \
'd_agecat30X1965', 'd_agecat35X1965','_Catholic' ,'_CatholicX1970', '_CatholicX1965',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', \
'd_ed_cat13X1970', 'd_ed_cat9X1965', 'd_ed_cat12X1965', 'd_ed_cat13X1965', \
'd_ed_cat16X1970','d_ed_cat16X1965', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', \
'd_hinccat1X1970', 'd_hinccat2X1970', \
'd_hinccat3X1970', 'd_hinccat4X1970', 'd_hinccat1X1965', 'd_hinccat2X1965', 'd_hinccat3X1965', \
'd_hinccat4X1965']]
X['int'] = np.repeat(1, len(Y))
model3 = Probit(Y,X)
probit_model3 = model3.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model3.summary())
#compute margins (get_margeff)
probit_margeffb3 = probit_model3.get_margeff()
probit_margeffb3.summary()
#fourth model
Y = df_reg['_barrier']
X = df_reg[['sales', 'd1970','d1965', 'dsalesX1970','dsalesX1965', '_Phys', 'd_PhysX1970', 'd_PhysX1965', 'dreg2', 'dreg3', 'dreg4', \
'dreg2X1970', 'dreg3X1970', 'dreg4X1970','dreg2X1965', 'dreg3X1965', 'dreg4X1965', 'any', 'anyX1970',
'anyX1965','d_agecat20', 'd_agecat25', 'd_agecat30', 'd_agecat35', 'd_agecat20X1970', \
'd_agecat25X1970', 'd_agecat30X1970', 'd_agecat35X1970','d_agecat20X1965', 'd_agecat25X1965', \
'd_agecat30X1965', 'd_agecat35X1965','_Catholic' ,'_CatholicX1970', '_CatholicX1965',\
'd_ed_cat9', 'd_ed_cat12', 'd_ed_cat13', 'd_ed_cat16', 'd_ed_cat9X1970', 'd_ed_cat12X1970', \
'd_ed_cat13X1970', 'd_ed_cat9X1965', 'd_ed_cat12X1965', 'd_ed_cat13X1965', \
'd_ed_cat16X1970','d_ed_cat16X1965', 'd_hinccat1', 'd_hinccat2', 'd_hinccat3', 'd_hinccat4', \
'd_hinccat1X1970', 'd_hinccat2X1970', \
'd_hinccat3X1970', 'd_hinccat4X1970', 'd_hinccat1X1965', 'd_hinccat2X1965', 'd_hinccat3X1965', \
'd_hinccat4X1965', 'd_idealcat2', 'd_idealcat3', 'd_idealcat4', 'd_idealcat5', 'd_idealcat2X1970', \
'd_idealcat3X1970', 'd_idealcat4X1970', 'd_idealcat5X1970', 'd_idealcat2X1965', \
'd_idealcat3X1965', 'd_idealcat4X1965', 'd_idealcat5X1965']]
X['int'] = np.repeat(1, len(Y))
model4 = Probit(Y,X)
probit_model4 = model4.fit(cov_type='cluster', cov_kwds={'groups': df_reg['_region']}, disp = disp_it)
#print(probit_model4.summary())
#compute margins (get_margeff)
probit_margeffb4 = probit_model4.get_margeff()
probit_margeffb4.summary()
#3. create table for output
table = | pd.DataFrame({'(1)': [], '(2)': [], '(3)': [], '(4)': []}) | pandas.DataFrame |
import filecmp
import os
import pandas as pd
import pytest
import sas7bdat_converter.converter as converter
import shutil
import xlrd
from pathlib import Path
from glob import glob
current_dir = Path().absolute()
def test_batch_to_csv(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.csv')
converted_file_2 = Path(tmpdir).joinpath('file2.csv')
converted_file_3 = Path(tmpdir).joinpath('file3.csv')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_csv(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert files_created
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.csv'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': 'test.csv'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': 'test.csv'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_csv_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_csv(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_batch_to_excel(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.xlsx')
converted_file_2 = Path(tmpdir).joinpath('file2.xlsx')
converted_file_3 = Path(tmpdir).joinpath('file3.xlsx')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_excel(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.xlsx'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': '<KEY>'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': '<KEY>'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_excel_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_excel(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_batch_to_json(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.json')
converted_file_2 = Path(tmpdir).joinpath('file2.json')
converted_file_3 = Path(tmpdir).joinpath('file3.json')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_json(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.json'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': 'test.json'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': 'test.json'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_json_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_json(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
optionals = [
{},
{'root_node': 'root'},
{'first_node': 'item'},
{'root_node': 'root', 'first_node': 'item'},
]
@pytest.mark.parametrize('optional', optionals)
def test_batch_to_xml(tmpdir, sas_file_1, sas_file_2, sas_file_3, optional):
converted_file_1 = Path(tmpdir).joinpath('file1.xml')
converted_file_2 = Path(tmpdir).joinpath('file2.xml')
converted_file_3 = Path(tmpdir).joinpath('file3.xml')
if optional.get('root_node') and optional.get('first_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
]
elif optional.get('root_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'root_node': optional.get('root_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'root_node': optional.get('root_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'root_node': optional.get('root_node'),
},
]
elif optional.get('first_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'first_node': optional.get('first_node'),
},
]
else:
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1,},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2,},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3,},
]
converter.batch_to_xml(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.xml'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': '<KEY>'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': '<KEY>'}],
[{'sas7bdat_file': 'test.sas7bdat', 'export_file': 'test.xml', 'root_node': 'test', 'bad': 'test'}],
[{'sas7bdat_file': 'test.sas7bdat', 'export_file': 'test.xml', 'bad': 'test', 'first_node': 'test'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_xml_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_xml(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_dir_to_csv_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_csv(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'csv'])
assert sas_counter == convert_counter
def test_dir_to_csv_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_csv(dir_path=str(sas7bdat_dir), export_path=str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'csv'])
assert sas_counter == convert_counter
def test_dir_to_excel_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_excel(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xlsx'])
assert sas_counter == convert_counter
def test_dir_to_excel_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_excel(dir_path=str(sas7bdat_dir), export_path=str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xlsx'])
assert sas_counter == convert_counter
def test_dir_to_json_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_json(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'json'])
assert sas_counter == convert_counter
def test_dir_to_json_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_json(str(sas7bdat_dir), str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'json'])
assert sas_counter == convert_counter
def test_dir_to_xml_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_xml(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xml'])
assert sas_counter == convert_counter
def test_dir_to_xml_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_xml(str(sas7bdat_dir), str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xml'])
assert sas_counter == convert_counter
exception_data = [
('sas7bdat conversion error - Valid extension for to_csv conversion is: .csv', ['.csv'], 'to_csv'),
('sas7bdat conversion error - Valid extensions for to_csv conversion are: .csv, .txt', ['.csv', '.txt'], 'to_csv'),
]
@pytest.mark.parametrize('exception', exception_data)
def test_file_extension_exception_message(exception):
valid_message = exception[0]
valid_extensions = exception[1]
test_message = converter._file_extension_exception_message(exception[2], valid_extensions)
assert valid_message == test_message
def test_invalid_key_exception_message_no_optional():
valid_message = 'Invalid key provided, expected keys are: sas7bdat_file, export_file'
required_keys = ['sas7bdat_file', 'export_file']
test_message = converter._invalid_key_exception_message(required_keys=required_keys)
assert valid_message == test_message
def test_invalid_key_exception_message_optional():
valid_message = 'Invalid key provided, expected keys are: sas7bdat_file, export_file and optional keys are: root_node, first_node'
required_keys = ['sas7bdat_file', 'export_file']
optional_keys = ['root_node', 'first_node']
test_message = converter._invalid_key_exception_message(required_keys=required_keys, optional_keys=optional_keys)
assert valid_message == test_message
@pytest.mark.parametrize('data', [
(('.txt', '.csv',), '.xml'),
(('.sas7bdat',), '.json'),
])
def test_is_valid_extension_false(data):
valid_extensions = data[0]
file_extension = data[1]
assert not converter._is_valid_extension(valid_extensions, file_extension)
@pytest.mark.parametrize('data', [
(('.txt', '.csv',), '.csv'),
(('.sas7bdat',), '.sas7bdat'),
])
def test_is_valid_extension_true(data):
valid_extensions = data[0]
file_extension = data[1]
assert converter._is_valid_extension(valid_extensions, file_extension)
@pytest.fixture(params=['sas_file_1', 'sas_file_2', 'sas_file_3'])
def test_to_csv(tmpdir, request, expected_dir):
sas_file = request.getfixturevalue(request.param)
converted_file = Path(tmpdir).joinpath('file1.csv')
expected_file = expected_dir.joinpath('file1.csv')
converter.to_csv(sas_file, converted_file)
assert filecmp.cmp(converted_file, expected_file, shallow=False)
def test_to_csv_invalid_extension():
with pytest.raises(AttributeError) as execinfo:
converter.to_csv('test.sas7bdat', 'test.bad')
assert 'sas7bdat conversion error - Valid extension' in str(execinfo.value)
def test_to_dataframe(sas_file_1):
d = {
'integer_row': [1.0, 2.0, 3.0, 4.0, 5.0,],
'text_row': [
'Some text',
'Some more text',
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc lobortis, risus nec euismod condimentum, lectus ligula porttitor massa, vel ornare mauris arcu vel augue. Maecenas rhoncus consectetur nisl, ac convallis enim pellentesque efficitur. Praesent tristique . End of textlectus a dolor sodales, in porttitor felis auctor. Etiam dui mauris, commodo at venenatis eu, lacinia nec tellus. Curabitur dictum tincidunt convallis. Duis vestibulum mauris quis felis euismod bibendum. Nulla eget nunc arcu. Nam quis est urna. In eleifend ultricies ultrices. In lacinia auctor ex, sed commodo nisl fringilla sed. Fusce iaculis viverra eros, nec elementum velit aliquam non. Aenean sollicitudin consequat libero, eget mattis.',
'Text',
'Test',
],
'float_row': [2.5, 17.23, 3.21, 100.9, 98.6,],
'date_row': ['2018-01-02', '2018-02-05', '2017-11-21', '2016-05-19', '1999-10-25',]
}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# pylint: disable-msg=E1101,W0612
from datetime import datetime, time, timedelta, date
import sys
import os
import operator
from distutils.version import LooseVersion
import nose
import numpy as np
randn = np.random.randn
from pandas import (Index, Series, TimeSeries, DataFrame,
isnull, date_range, Timestamp, Period, DatetimeIndex,
Int64Index, to_datetime, bdate_range, Float64Index)
import pandas.core.datetools as datetools
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
import pandas.tseries.frequencies as fmod
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from pandas.tslib import NaT, iNaT
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.index as _index
from pandas.compat import range, long, StringIO, lrange, lmap, zip, product
import pandas.core.datetools as dt
from numpy.random import rand
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
import pandas.compat as compat
import pandas.core.common as com
from pandas import concat
from pandas import _np_version_under1p7
from numpy.testing.decorators import slow
def _skip_if_no_pytz():
try:
import pytz
except ImportError:
raise nose.SkipTest("pytz not installed")
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
raise nose.SkipTest("Specific locale is set {0}".format(lang))
class TestTimeSeriesDuplicates(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
tm.assert_isinstance(self.dups, TimeSeries)
tm.assert_isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
self.assertFalse(self.dups.index.is_unique)
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
self.assertEqual(uniques.dtype, 'M8[ns]') # sanity
self.assertTrue(uniques.equals(expected))
self.assertEqual(self.dups.index.nunique(), 4)
# #2563
self.assertTrue(isinstance(uniques, DatetimeIndex))
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, tz='US/Eastern')
self.assertTrue(result.tz is not None)
self.assertEqual(result.name, 'foo')
self.assertTrue(result.equals(expected))
# NaT
arr = [ 1370745748 + t for t in range(20) ] + [iNaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
arr = [ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT]
idx = DatetimeIndex(arr * 3)
self.assertTrue(idx.unique().equals(DatetimeIndex(arr)))
self.assertEqual(idx.nunique(), 21)
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
self.assertTrue(d in ix)
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
self.assertRaises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000,1,6)] = 0
self.assertEqual(ts[datetime(2000,1,6)], 0)
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
self.assertIn(timestamp, df.index)
# it works!
df.ix[timestamp]
self.assertTrue(len(df.ix[[timestamp]]) > 0)
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(randn(len(rng)), index=rng)
ts2 = concat([ts[0:4],ts[-4:],ts[4:-4]])
for t in ts.index:
s = str(t)
expected = ts[t]
result = ts2[t]
self.assertTrue(expected == result)
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result,expected)
compare(slice('2011-01-01','2011-01-15'))
compare(slice('2010-12-30','2011-01-15'))
compare(slice('2011-01-01','2011-01-16'))
# partial ranges
compare(slice('2011-01-01','2011-01-6'))
compare(slice('2011-01-06','2011-01-8'))
compare(slice('2011-01-06','2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result,expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
self.assertTrue(t.year == 2005)
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)),index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
df = DataFrame(dict(A = ts))
result = df['2001']['A']
assert_series_equal(expected,result)
# setting
ts['2001'] = 1
expected = ts['2001']
df.loc['2001','A'] = 1
result = df['2001']['A']
assert_series_equal(expected,result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00', freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59', freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected,ts)
idx = [ Timestamp('2013-05-31 00:00'), Timestamp(datetime(2013,5,31,23,59,59,999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected,ts)
# GH 3925, indexing with a seconds resolution string / datetime object
df = DataFrame(randn(5,5),columns=['open','high','low','close','volume'],index=date_range('2012-01-02 18:01:00',periods=5,tz='US/Central',freq='s'))
expected = df.loc[[df.index[2]]]
result = df['2012-01-02 18:01:02']
assert_frame_equal(result,expected)
# this is a single date, so will raise
self.assertRaises(KeyError, df.__getitem__, df.index[2],)
def test_recreate_from_data(self):
if _np_version_under1p7:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'A', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N', 'C']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, periods=1)
idx = DatetimeIndex(org, freq=f)
self.assertTrue(idx.equals(org))
# unbale to create tz-aware 'A' and 'C' freq
if _np_version_under1p7:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H']
else:
freqs = ['M', 'Q', 'D', 'B', 'T', 'S', 'L', 'U', 'H', 'N']
for f in freqs:
org = DatetimeIndex(start='2001/02/01 09:00', freq=f, tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=f, tz='US/Pacific')
self.assertTrue(idx.equals(org))
def assert_range_equal(left, right):
assert(left.equals(right))
assert(left.freq == right.freq)
assert(left.tz == right.tz)
class TestTimeSeries(tm.TestCase):
_multiprocess_can_split_ = True
def test_is_(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
self.assertTrue(dti.is_(dti))
self.assertTrue(dti.is_(dti.view()))
self.assertFalse(dti.is_(dti.copy()))
def test_dti_slicing(self):
dti = DatetimeIndex(start='1/1/2005', end='12/1/2005', freq='M')
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
self.assertEqual(v1, Timestamp('2/28/2005'))
self.assertEqual(v2, Timestamp('4/30/2005'))
self.assertEqual(v3, Timestamp('6/30/2005'))
# don't carry freq through irregular slicing
self.assertIsNone(dti2.freq)
def test_pass_datetimeindex_to_index(self):
# Bugs in #1396
rng = date_range('1/1/2000', '3/1/2000')
idx = Index(rng, dtype=object)
expected = Index(rng.to_pydatetime(), dtype=object)
self.assert_numpy_array_equal(idx.values, expected.values)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
self.assertIsNotNone(expected.freq)
assert_range_equal(masked, expected)
mask[22] = True
masked = rng[mask]
self.assertIsNone(masked.freq)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_series_box_timestamp(self):
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng)
tm.assert_isinstance(s[5], Timestamp)
rng = date_range('20090415', '20090519', freq='B')
s = Series(rng, index=rng)
tm.assert_isinstance(s[5], Timestamp)
tm.assert_isinstance(s.iget_value(5), Timestamp)
def test_date_range_ambiguous_arguments(self):
# #2538
start = datetime(2011, 1, 1, 5, 3, 40)
end = datetime(2011, 1, 1, 8, 9, 40)
self.assertRaises(ValueError, date_range, start, end,
freq='s', periods=10)
def test_timestamp_to_datetime(self):
_skip_if_no_pytz()
rng = date_range('20090415', '20090519',
tz='US/Eastern')
stamp = rng[0]
dtval = stamp.to_pydatetime()
self.assertEqual(stamp, dtval)
self.assertEqual(stamp.tzinfo, dtval.tzinfo)
def test_index_convert_to_datetime_array(self):
_skip_if_no_pytz()
def _check_rng(rng):
converted = rng.to_pydatetime()
tm.assert_isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
tm.assert_isinstance(x, datetime)
self.assertEqual(x, stamp.to_pydatetime())
self.assertEqual(x.tzinfo, stamp.tzinfo)
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
self.assertEqual(rng[0].second, 1)
def test_series_ctor_plus_datetimeindex(self):
rng = date_range('20090415', '20090519', freq='B')
data = dict((k, 1) for k in rng)
result = Series(data, index=rng)
self.assertIs(result.index, rng)
def test_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
result = s[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected[-3:] = np.nan
assert_series_equal(result, expected)
result = s[-2:].reindex(index)
result = result.fillna(method='bfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected[:3] = np.nan
assert_series_equal(result, expected)
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_setitem_timestamp(self):
# 2155
columns = DatetimeIndex(start='1/1/2012', end='2/1/2012',
freq=datetools.bday)
index = lrange(10)
data = DataFrame(columns=columns, index=index)
t = datetime(2012, 11, 1)
ts = Timestamp(t)
data[ts] = np.nan # works
def test_sparse_series_fillna_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
ss = s[:2].reindex(index).to_sparse()
result = ss.fillna(method='pad', limit=5)
expected = ss.fillna(method='pad', limit=5)
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
ss = s[-2:].reindex(index).to_sparse()
result = ss.fillna(method='backfill', limit=5)
expected = ss.fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_series_pad_backfill_limit(self):
index = np.arange(10)
s = Series(np.random.randn(10), index=index)
s = s.to_sparse()
result = s[:2].reindex(index, method='pad', limit=5)
expected = s[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected[-3:] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
result = s[-2:].reindex(index, method='backfill', limit=5)
expected = s[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected[:3] = np.nan
expected = expected.to_sparse()
assert_series_equal(result, expected)
def test_sparse_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index, method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index, method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_sparse_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
sdf = df.to_sparse()
result = sdf[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = sdf[:2].reindex(index).fillna(method='pad')
expected = expected.to_dense()
expected.values[-3:] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
result = sdf[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = sdf[-2:].reindex(index).fillna(method='backfill')
expected = expected.to_dense()
expected.values[:3] = np.nan
expected = expected.to_sparse()
tm.assert_frame_equal(result, expected)
def test_pad_require_monotonicity(self):
rng = date_range('1/1/2000', '3/1/2000', freq='B')
rng2 = rng[::2][::-1]
self.assertRaises(ValueError, rng2.get_indexer, rng,
method='pad')
def test_frame_ctor_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
df = DataFrame({'A': np.random.randn(len(rng)), 'B': dates})
self.assertTrue(np.issubdtype(df['B'].dtype, np.dtype('M8[ns]')))
def test_frame_add_datetime64_column(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
df = DataFrame(index=np.arange(len(rng)))
df['A'] = rng
self.assertTrue(np.issubdtype(df['A'].dtype, np.dtype('M8[ns]')))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({'year': date_range('1/1/1700', periods=50,
freq='A-DEC')})
# it works!
repr(df)
def test_frame_add_datetime64_col_other_units(self):
n = 100
units = ['h', 'm', 's', 'ms', 'D', 'M', 'Y']
ns_dtype = np.dtype('M8[ns]')
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertEqual(df[unit].dtype, ns_dtype)
self.assertTrue((df[unit].values == ex_vals).all())
# Test insertion into existing datetime64 column
df = DataFrame({'ints': np.arange(n)}, index=np.arange(n))
df['dates'] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype('M8[%s]' % unit)
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp['dates'] = vals
ex_vals = to_datetime(vals.astype('O'))
self.assertTrue((tmp['dates'].values == ex_vals).all())
def test_to_datetime_unit(self):
epoch = 1370745748
s = Series([ epoch + t for t in range(20) ])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT])
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = Series([ epoch + t for t in range(20) ] + [iNaT]).astype(float)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
s = concat([Series([ epoch + t for t in range(20) ]).astype(float),Series([np.nan])],ignore_index=True)
result = to_datetime(s,unit='s')
expected = Series([ Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t) for t in range(20) ] + [NaT])
assert_series_equal(result,expected)
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50',
freq='10s')
dates = np.asarray(rng)
series = Series(dates)
self.assertTrue(np.issubdtype(series.dtype, np.dtype('M8[ns]')))
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
self.assertTrue((idx.values == tslib.cast_to_nanoseconds(arr)).all())
def test_index_astype_datetime64(self):
idx = Index([datetime(2012, 1, 1)], dtype=object)
if not _np_version_under1p7:
raise nose.SkipTest("test only valid in numpy < 1.7")
casted = idx.astype(np.dtype('M8[D]'))
expected = DatetimeIndex(idx.values)
tm.assert_isinstance(casted, DatetimeIndex)
self.assertTrue(casted.equals(expected))
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
self.assertTrue(np.issubdtype(result.dtype, np.dtype('M8[ns]')))
mask = result.isnull()
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
self.assertTrue(np.issubdtype(result['B'].dtype, np.dtype('M8[ns]')))
mask = com.isnull(result)['B']
self.assertTrue(mask[-5:].all())
self.assertFalse(mask[:-5].any())
def test_series_repr_nat(self):
series = Series([0, 1000, 2000, iNaT], dtype='M8[ns]')
result = repr(series)
expected = ('0 1970-01-01 00:00:00\n'
'1 1970-01-01 00:00:00.000001\n'
'2 1970-01-01 00:00:00.000002\n'
'3 NaT\n'
'dtype: datetime64[ns]')
self.assertEqual(result, expected)
def test_fillna_nat(self):
series = Series([0, 1, 2, iNaT], dtype='M8[ns]')
filled = series.fillna(method='pad')
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='pad')
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
series = Series([iNaT, 0, 1, 2], dtype='M8[ns]')
filled = series.fillna(method='bfill')
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
assert_series_equal(filled, expected)
assert_series_equal(filled2, expected)
df = DataFrame({'A': series})
filled = df.fillna(method='bfill')
filled2 = df.fillna(value=series[1])
expected = DataFrame({'A': expected})
assert_frame_equal(filled, expected)
assert_frame_equal(filled2, expected)
def test_string_na_nat_conversion(self):
# GH #999, #858
from pandas.compat import parse_date
strings = np.array(['1/1/2000', '1/2/2000', np.nan,
'1/4/2000, 12:34:56'], dtype=object)
expected = np.empty(4, dtype='M8[ns]')
for i, val in enumerate(strings):
if com.isnull(val):
expected[i] = iNaT
else:
expected[i] = parse_date(val)
result = tslib.array_to_datetime(strings)
assert_almost_equal(result, expected)
result2 = to_datetime(strings)
tm.assert_isinstance(result2, DatetimeIndex)
assert_almost_equal(result, result2)
malformed = np.array(['1/100/2000', np.nan], dtype=object)
result = to_datetime(malformed)
assert_almost_equal(result, malformed)
self.assertRaises(ValueError, to_datetime, malformed,
errors='raise')
idx = ['a', 'b', 'c', 'd', 'e']
series = Series(['1/1/2000', np.nan, '1/3/2000', np.nan,
'1/5/2000'], index=idx, name='foo')
dseries = Series([to_datetime('1/1/2000'), np.nan,
to_datetime('1/3/2000'), np.nan,
to_datetime('1/5/2000')], index=idx, name='foo')
result = to_datetime(series)
dresult = to_datetime(dseries)
expected = Series(np.empty(5, dtype='M8[ns]'), index=idx)
for i in range(5):
x = series[i]
if isnull(x):
expected[i] = iNaT
else:
expected[i] = to_datetime(x)
assert_series_equal(result, expected)
self.assertEqual(result.name, 'foo')
assert_series_equal(dresult, expected)
self.assertEqual(dresult.name, 'foo')
def test_to_datetime_iso8601(self):
result = to_datetime(["2012-01-01 00:00:00"])
exp = Timestamp("2012-01-01 00:00:00")
self.assertEqual(result[0], exp)
result = to_datetime(['20121001']) # bad iso 8601
exp = Timestamp('2012-10-01')
self.assertEqual(result[0], exp)
def test_to_datetime_default(self):
rs = to_datetime('2001')
xp = datetime(2001, 1, 1)
self.assertTrue(rs, xp)
#### dayfirst is essentially broken
#### to_datetime('01-13-2012', dayfirst=True)
#### self.assertRaises(ValueError, to_datetime('01-13-2012', dayfirst=True))
def test_to_datetime_on_datetime64_series(self):
# #2699
s = Series(date_range('1/1/2000', periods=10))
result = to_datetime(s)
self.assertEqual(result[0], s[0])
def test_to_datetime_with_apply(self):
# this is only locale tested with US/None locales
_skip_if_has_locale()
# GH 5195
# with a format and coerce a single item to_datetime fails
td = Series(['May 04', 'Jun 02', 'Dec 11'], index=[1,2,3])
expected = pd.to_datetime(td, format='%b %y')
result = td.apply(pd.to_datetime, format='%b %y')
assert_series_equal(result, expected)
td = pd.Series(['May 04', 'Jun 02', ''], index=[1,2,3])
self.assertRaises(ValueError, lambda : pd.to_datetime(td,format='%b %y'))
self.assertRaises(ValueError, lambda : td.apply(pd.to_datetime, format='%b %y'))
expected = pd.to_datetime(td, format='%b %y', coerce=True)
result = td.apply(lambda x: pd.to_datetime(x, format='%b %y', coerce=True))
assert_series_equal(result, expected)
def test_nat_vector_field_access(self):
idx = DatetimeIndex(['1/1/2000', None, None, '1/4/2000'])
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else -1
for x in idx]
self.assert_numpy_array_equal(result, expected)
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
'week', 'dayofyear']
for field in fields:
result = getattr(NaT, field)
self.assertEqual(result, -1)
self.assertEqual(NaT.weekday(), -1)
def test_to_datetime_types(self):
# empty string
result = to_datetime('')
self.assertIs(result, NaT)
result = to_datetime(['', ''])
self.assertTrue(isnull(result).all())
# ints
result = Timestamp(0)
expected = to_datetime(0)
self.assertEqual(result, expected)
# GH 3888 (strings)
expected = to_datetime(['2012'])[0]
result = to_datetime('2012')
self.assertEqual(result, expected)
### array = ['2012','20120101','20120101 12:01:01']
array = ['20120101','20120101 12:01:01']
expected = list(to_datetime(array))
result = lmap(Timestamp,array)
tm.assert_almost_equal(result,expected)
### currently fails ###
### result = Timestamp('2012')
### expected = to_datetime('2012')
### self.assertEqual(result, expected)
def test_to_datetime_unprocessable_input(self):
# GH 4928
self.assert_numpy_array_equal(
to_datetime([1, '1']),
np.array([1, '1'], dtype='O')
)
self.assertRaises(TypeError, to_datetime, [1, '1'], errors='raise')
def test_to_datetime_other_datetime64_units(self):
# 5/25/2012
scalar = np.int64(1337904000000000).view('M8[us]')
as_obj = scalar.astype('O')
index = DatetimeIndex([scalar])
self.assertEqual(index[0], scalar.astype('O'))
value = Timestamp(scalar)
self.assertEqual(value, as_obj)
def test_to_datetime_list_of_integers(self):
rng = date_range('1/1/2000', periods=20)
rng = DatetimeIndex(rng.values)
ints = list(rng.asi8)
result = DatetimeIndex(ints)
self.assertTrue(rng.equals(result))
def test_to_datetime_dt64s(self):
in_bound_dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
for dt in in_bound_dts:
self.assertEqual(
pd.to_datetime(dt),
Timestamp(dt)
)
oob_dts = [
np.datetime64('1000-01-01'),
np.datetime64('5000-01-02'),
]
for dt in oob_dts:
self.assertRaises(ValueError, pd.to_datetime, dt, errors='raise')
self.assertRaises(ValueError, tslib.Timestamp, dt)
self.assertIs(pd.to_datetime(dt, coerce=True), NaT)
def test_to_datetime_array_of_dt64s(self):
dts = [
np.datetime64('2000-01-01'),
np.datetime64('2000-01-02'),
]
# Assuming all datetimes are in bounds, to_datetime() returns
# an array that is equal to Timestamp() parsing
self.assert_numpy_array_equal(
pd.to_datetime(dts, box=False),
np.array([Timestamp(x).asm8 for x in dts])
)
# A list of datetimes where the last one is out of bounds
dts_with_oob = dts + [np.datetime64('9999-01-01')]
self.assertRaises(
ValueError,
pd.to_datetime,
dts_with_oob,
coerce=False,
errors='raise'
)
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=True),
np.array(
[
Timestamp(dts_with_oob[0]).asm8,
Timestamp(dts_with_oob[1]).asm8,
iNaT,
],
dtype='M8'
)
)
# With coerce=False and errors='ignore', out of bounds datetime64s
# are converted to their .item(), which depending on the version of
# numpy is either a python datetime.datetime or datetime.date
self.assert_numpy_array_equal(
pd.to_datetime(dts_with_oob, box=False, coerce=False),
np.array(
[dt.item() for dt in dts_with_oob],
dtype='O'
)
)
def test_index_to_datetime(self):
idx = Index(['1/1/2000', '1/2/2000', '1/3/2000'])
result = idx.to_datetime()
expected = DatetimeIndex(datetools.to_datetime(idx.values))
self.assertTrue(result.equals(expected))
today = datetime.today()
idx = Index([today], dtype=object)
result = idx.to_datetime()
expected = DatetimeIndex([today])
self.assertTrue(result.equals(expected))
def test_to_datetime_freq(self):
xp = bdate_range('2000-1-1', periods=10, tz='UTC')
rs = xp.to_datetime()
self.assertEqual(xp.freq, rs.freq)
self.assertEqual(xp.tzinfo, rs.tzinfo)
def test_range_misspecified(self):
# GH #1095
self.assertRaises(ValueError, date_range, '1/1/2000')
self.assertRaises(ValueError, date_range, end='1/1/2000')
self.assertRaises(ValueError, date_range, periods=10)
self.assertRaises(ValueError, date_range, '1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, end='1/1/2000', freq='H')
self.assertRaises(ValueError, date_range, periods=10, freq='H')
def test_reasonable_keyerror(self):
# GH #1062
index = DatetimeIndex(['1/3/2000'])
try:
index.get_loc('1/1/2000')
except KeyError as e:
self.assertIn('2000', str(e))
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_promote_datetime_date(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq('4H', method='ffill')
expected = ts[5:].asfreq('4H', method='ffill')
assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
self.assert_numpy_array_equal(result, expected)
def test_asfreq_normalize(self):
rng = date_range('1/1/2000 09:30', periods=20)
norm = date_range('1/1/2000', periods=20)
vals = np.random.randn(20)
ts = Series(vals, index=rng)
result = ts.asfreq('D', normalize=True)
norm = date_range('1/1/2000', periods=20)
expected = Series(vals, index=norm)
assert_series_equal(result, expected)
vals = np.random.randn(20, 3)
ts = DataFrame(vals, index=rng)
result = ts.asfreq('D', normalize=True)
expected = DataFrame(vals, index=norm)
assert_frame_equal(result, expected)
def test_date_range_gen_error(self):
rng = date_range('1/1/2000 00:00', '1/1/2000 00:18', freq='5min')
self.assertEqual(len(rng), 4)
def test_first_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.first('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.first('10d')
self.assertEqual(len(result), 10)
result = ts.first('3M')
expected = ts[:'3/31/2000']
assert_series_equal(result, expected)
result = ts.first('21D')
expected = ts[:21]
assert_series_equal(result, expected)
result = ts[:0].first('3M')
assert_series_equal(result, ts[:0])
def test_last_subset(self):
ts = _simple_ts('1/1/2000', '1/1/2010', freq='12h')
result = ts.last('10d')
self.assertEqual(len(result), 20)
ts = _simple_ts('1/1/2000', '1/1/2010')
result = ts.last('10d')
self.assertEqual(len(result), 10)
result = ts.last('21D')
expected = ts['12/12/2009':]
assert_series_equal(result, expected)
result = ts.last('21D')
expected = ts[-21:]
assert_series_equal(result, expected)
result = ts[:0].last('3M')
assert_series_equal(result, ts[:0])
def test_add_offset(self):
rng = date_range('1/1/2000', '2/1/2000')
result = rng + offsets.Hour(2)
expected = date_range('1/1/2000 02:00', '2/1/2000 02:00')
self.assertTrue(result.equals(expected))
def test_format_pre_1900_dates(self):
rng = date_range('1/1/1850', '1/1/1950', freq='A-DEC')
rng.format()
ts = Series(1, index=rng)
repr(ts)
def test_repeat(self):
rng = date_range('1/1/2000', '1/1/2001')
result = rng.repeat(5)
self.assertIsNone(result.freq)
self.assertEqual(len(result), 5 * len(rng))
def test_at_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.ix[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
# expected.index = date_range('1/1/2000', '1/4/2000')
assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.ix['1/4/2000':]
result = chunk.ix[time(9, 30)]
expected = result_df[-1:]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
assert_series_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_at_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
self.assertTrue((rs.index.hour == rng[1].hour).all())
self.assertTrue((rs.index.minute == rng[1].minute).all())
self.assertTrue((rs.index.second == rng[1].second).all())
result = ts.at_time('9:30')
expected = ts.at_time(time(9, 30))
assert_frame_equal(result, expected)
result = ts.ix[time(9, 30)]
expected = ts.ix[(rng.hour == 9) & (rng.minute == 30)]
assert_frame_equal(result, expected)
# midnight, everything
rng = date_range('1/1/2000', '1/31/2000')
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range('1/1/2012', freq='23Min', periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time('16:00')
self.assertEqual(len(rs), 0)
def test_between_time(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_series_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = Series(np.random.randn(len(rng)), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_between_time_frame(self):
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue(t >= stime)
else:
self.assertTrue(t > stime)
if inc_end:
self.assertTrue(t <= etime)
else:
self.assertTrue(t < etime)
result = ts.between_time('00:00', '01:00')
expected = ts.between_time(stime, etime)
assert_frame_equal(result, expected)
# across midnight
rng = date_range('1/1/2000', '1/5/2000', freq='5min')
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
close_open = product([True, False], [True, False])
for inc_start, inc_end in close_open:
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
self.assertEqual(len(filtered), exp_len)
for rs in filtered.index:
t = rs.time()
if inc_start:
self.assertTrue((t >= stime) or (t <= etime))
else:
self.assertTrue((t > stime) or (t <= etime))
if inc_end:
self.assertTrue((t <= etime) or (t >= stime))
else:
self.assertTrue((t < etime) or (t >= stime))
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
self.assertEqual(rng.freq, rng2.freq)
def test_normalize(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D')
self.assertTrue(result.equals(expected))
rng_ns = pd.DatetimeIndex(np.array([1380585623454345752, 1380585612343234312]).astype("datetime64[ns]"))
rng_ns_normalized = rng_ns.normalize()
expected = pd.DatetimeIndex(np.array([1380585600000000000, 1380585600000000000]).astype("datetime64[ns]"))
self.assertTrue(rng_ns_normalized.equals(expected))
self.assertTrue(result.is_normalized)
self.assertFalse(rng.is_normalized)
def test_to_period(self):
from pandas.tseries.period import period_range
ts = _simple_ts('1/1/2000', '1/1/2001')
pts = ts.to_period()
exp = ts.copy()
exp.index = period_range('1/1/2000', '1/1/2001')
assert_series_equal(pts, exp)
pts = ts.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
def create_dt64_based_index(self):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
index = DatetimeIndex(data)
return index
def test_to_period_millisecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='L')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123Z', 'L'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789Z', 'L'))
def test_to_period_microsecond(self):
index = self.create_dt64_based_index()
period = index.to_period(freq='U')
self.assertEqual(2, len(period))
self.assertEqual(period[0], Period('2007-01-01 10:11:12.123456Z', 'U'))
self.assertEqual(period[1], Period('2007-01-01 10:11:13.789123Z', 'U'))
def test_to_period_tz(self):
_skip_if_no_pytz()
from dateutil.tz import tzlocal
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
self.assertEqual(result, expected)
self.assertTrue(ts.to_period().equals(xp))
def test_frame_to_period(self):
K = 5
from pandas.tseries.period import period_range
dr = date_range('1/1/2000', '1/1/2001')
pr = period_range('1/1/2000', '1/1/2001')
df = DataFrame(randn(len(dr), K), index=dr)
df['mix'] = 'a'
pts = df.to_period()
exp = df.copy()
exp.index = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M')
self.assertTrue(pts.index.equals(exp.index.asfreq('M')))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
assert_frame_equal(pts, exp)
pts = df.to_period('M', axis=1)
self.assertTrue(pts.columns.equals(exp.columns.asfreq('M')))
self.assertRaises(ValueError, df.to_period, axis=2)
def test_timestamp_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
fields = ['dayofweek', 'dayofyear', 'week', 'weekofyear', 'quarter', 'is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end']
for f in fields:
expected = getattr(idx, f)[-1]
result = getattr(Timestamp(idx[-1]), f)
self.assertEqual(result, expected)
self.assertEqual(idx.freq, Timestamp(idx[-1], idx.freq).freq)
self.assertEqual(idx.freqstr, Timestamp(idx[-1], idx.freq).freqstr)
def test_woy_boundary(self):
# make sure weeks at year boundaries are correct
d = datetime(2013,12,31)
result = Timestamp(d).week
expected = 1 # ISO standard
self.assertEqual(result, expected)
d = datetime(2008,12,28)
result = Timestamp(d).week
expected = 52 # ISO standard
self.assertEqual(result, expected)
d = datetime(2009,12,31)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,1)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
d = datetime(2010,1,3)
result = Timestamp(d).week
expected = 53 # ISO standard
self.assertEqual(result, expected)
result = np.array([Timestamp(datetime(*args)).week for args in
[(2000,1,1),(2000,1,2),(2005,1,1),(2005,1,2)]])
self.assertTrue((result == [52, 52, 53, 53]).all())
def test_timestamp_date_out_of_range(self):
self.assertRaises(ValueError, Timestamp, '1676-01-01')
self.assertRaises(ValueError, Timestamp, '2263-01-01')
# 1475
self.assertRaises(ValueError, DatetimeIndex, ['1400-01-01'])
self.assertRaises(ValueError, DatetimeIndex, [datetime(1400, 1, 1)])
def test_timestamp_repr(self):
# pre-1900
stamp = Timestamp('1850-01-01', tz='US/Eastern')
repr(stamp)
iso8601 = '1850-01-01 01:23:45.012345'
stamp = Timestamp(iso8601, tz='US/Eastern')
result = repr(stamp)
self.assertIn(iso8601, result)
def test_timestamp_from_ordinal(self):
# GH 3042
dt = datetime(2011, 4, 16, 0, 0)
ts = Timestamp.fromordinal(dt.toordinal())
self.assertEqual(ts.to_pydatetime(), dt)
# with a tzinfo
stamp = Timestamp('2011-4-16', tz='US/Eastern')
dt_tz = stamp.to_pydatetime()
ts = Timestamp.fromordinal(dt_tz.toordinal(),tz='US/Eastern')
self.assertEqual(ts.to_pydatetime(), dt_tz)
def test_datetimeindex_integers_shift(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
self.assertTrue(result.equals(expected))
result = rng - 5
expected = rng.shift(-5)
self.assertTrue(result.equals(expected))
def test_astype_object(self):
# NumPy 1.6.1 weak ns support
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
self.assert_numpy_array_equal(casted, exp_values)
def test_catch_infinite_loop(self):
offset = datetools.DateOffset(minute=5)
# blow up, don't loop forever
self.assertRaises(Exception, date_range, datetime(2011, 11, 11),
datetime(2011, 11, 12), freq=offset)
def test_append_concat(self):
rng = date_range('5/8/2012 1:45', periods=10, freq='5T')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
result = ts.append(ts)
result_df = df.append(df)
ex_index = DatetimeIndex(np.tile(rng.values, 2))
self.assertTrue(result.index.equals(ex_index))
self.assertTrue(result_df.index.equals(ex_index))
appended = rng.append(rng)
self.assertTrue(appended.equals(ex_index))
appended = rng.append([rng, rng])
ex_index = DatetimeIndex(np.tile(rng.values, 3))
self.assertTrue(appended.equals(ex_index))
# different index names
rng1 = rng.copy()
rng2 = rng.copy()
rng1.name = 'foo'
rng2.name = 'bar'
self.assertEqual(rng1.append(rng1).name, 'foo')
self.assertIsNone(rng1.append(rng2).name)
def test_append_concat_tz(self):
#GH 2938
_skip_if_no_pytz()
rng = date_range('5/8/2012 1:45', periods=10, freq='5T',
tz='US/Eastern')
rng2 = date_range('5/8/2012 2:35', periods=10, freq='5T',
tz='US/Eastern')
rng3 = date_range('5/8/2012 1:45', periods=20, freq='5T',
tz='US/Eastern')
ts = Series(np.random.randn(len(rng)), rng)
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
ts2 = Series(np.random.randn(len(rng2)), rng2)
df2 = DataFrame(np.random.randn(len(rng2), 4), index=rng2)
result = ts.append(ts2)
result_df = df.append(df2)
self.assertTrue(result.index.equals(rng3))
self.assertTrue(result_df.index.equals(rng3))
appended = rng.append(rng2)
self.assertTrue(appended.equals(rng3))
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
self.assertEqual(x[0].dtype, np.dtype('M8[ns]'))
def test_groupby_count_dateparseerror(self):
dr = date_range(start='1/1/2012', freq='5min', periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, lrange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[lrange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
assert_series_equal(result, expected)
def test_datetimeindex_repr_short(self):
dr = date_range(start='1/1/2012', periods=1)
repr(dr)
dr = date_range(start='1/1/2012', periods=2)
repr(dr)
dr = date_range(start='1/1/2012', periods=3)
repr(dr)
def test_constructor_int64_nocopy(self):
# #1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] == -1).all())
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
self.assertTrue((index.asi8[50:100] != -1).all())
def test_series_interpolate_method_values(self):
# #1646
ts = _simple_ts('1/1/2000', '1/20/2000')
ts[::2] = np.nan
result = ts.interpolate(method='values')
exp = ts.interpolate()
assert_series_equal(result, exp)
def test_frame_datetime64_handling_groupby(self):
# it works!
df = DataFrame([(3, np.datetime64('2012-07-03')),
(3, np.datetime64('2012-07-04'))],
columns=['a', 'date'])
result = df.groupby('a').first()
self.assertEqual(result['date'][3], Timestamp('2012-07-03'))
def test_series_interpolate_intraday(self):
# #1698
index = pd.date_range('1/1/2012', periods=4, freq='12D')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(days=1)).order()
exp = ts.reindex(new_index).interpolate(method='time')
index = pd.date_range('1/1/2012', periods=4, freq='12H')
ts = pd.Series([0, 12, 24, 36], index)
new_index = index.append(index + pd.DateOffset(hours=1)).order()
result = ts.reindex(new_index).interpolate(method='time')
self.assert_numpy_array_equal(result.values, exp.values)
def test_frame_dict_constructor_datetime64_1680(self):
dr = date_range('1/1/2012', periods=10)
s = Series(dr, index=dr)
# it works!
DataFrame({'a': 'foo', 'b': s}, index=dr)
DataFrame({'a': 'foo', 'b': s.values}, index=dr)
def test_frame_datetime64_mixed_index_ctor_1681(self):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
ts = Series(dr)
# it works!
d = DataFrame({'A': 'foo', 'B': ts}, index=dr)
self.assertTrue(d['B'].isnull().all())
def test_frame_timeseries_to_records(self):
index = date_range('1/1/2000', periods=10)
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['a', 'b', 'c'])
result = df.to_records()
result['index'].dtype == 'M8[ns]'
result = df.to_records(index=False)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
self.assertTrue((-result).all())
tst = DataFrame({'date': dates})
result = tst.duplicated()
self.assertTrue((-result).all())
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
self.assertFalse(stamp == datetime.min)
self.assertFalse(stamp == datetime(1600, 1, 1))
self.assertFalse(stamp == datetime(2700, 1, 1))
self.assertNotEqual(stamp, datetime.min)
self.assertNotEqual(stamp, datetime(1600, 1, 1))
self.assertNotEqual(stamp, datetime(2700, 1, 1))
self.assertTrue(stamp > datetime(1600, 1, 1))
self.assertTrue(stamp >= datetime(1600, 1, 1))
self.assertTrue(stamp < datetime(2700, 1, 1))
self.assertTrue(stamp <= datetime(2700, 1, 1))
def test_to_html_timestamp(self):
rng = date_range('2000-01-01', periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
self.assertIn('2000-01-01', result)
def test_to_csv_numpy_16_bug(self):
frame = DataFrame({'a': date_range('1/1/2000', periods=10)})
buf = StringIO()
frame.to_csv(buf)
result = buf.getvalue()
self.assertIn('2000-01-01', result)
def test_series_map_box_timestamps(self):
# #2689, #2627
s = Series(date_range('1/1/2000', periods=10))
def f(x):
return (x.hour, x.day, x.month)
# it works!
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_concat_datetime_datetime64_frame(self):
# #2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), 'hi'])
df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({'date': ind, 'test':lrange(10)})
# it works!
pd.concat([df1, df2_obj])
def test_period_resample(self):
# GH3609
s = Series(range(100),index=date_range('20130101', freq='s', periods=100), dtype='float')
s[10:30] = np.nan
expected = Series([34.5, 79.5], index=[Period('2013-01-01 00:00', 'T'), Period('2013-01-01 00:01', 'T')])
result = s.to_period().resample('T', kind='period')
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period')
assert_series_equal(result2, expected)
def test_period_resample_with_local_timezone(self):
# GH5430
_skip_if_no_pytz()
import pytz
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0, tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0, tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = pd.Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period')
# Create the expected series
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1) # Index is moved back a day with the timezone conversion from UTC to Pacific
expected = pd.Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_pickle(self):
#GH4606
from pandas.compat import cPickle
import pickle
for pick in [pickle, cPickle]:
p = pick.loads(pick.dumps(NaT))
self.assertTrue(p is NaT)
idx = pd.to_datetime(['2013-01-01', NaT, '2014-01-06'])
idx_p = pick.loads(pick.dumps(idx))
self.assertTrue(idx_p[0] == idx[0])
self.assertTrue(idx_p[1] is NaT)
self.assertTrue(idx_p[2] == idx[2])
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestDatetimeIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_hash_error(self):
index = date_range('20010101', periods=10)
with tm.assertRaisesRegexp(TypeError,
"unhashable type: %r" %
type(index).__name__):
hash(index)
def test_stringified_slice_with_tz(self):
#GH2658
import datetime
start=datetime.datetime.now()
idx=DatetimeIndex(start=start,freq="1d",periods=10)
df=DataFrame(lrange(10),index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
tm.assert_isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_numpy_array_equal(result, rng.asi8)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
idx.to_period()
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
self.assertEqual(idx.nanosecond[0], t1.nanosecond)
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
self.assertTrue(rng.equals(exp))
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
periods='foo', freq='D')
self.assertRaises(ValueError, DatetimeIndex, start='1/1/2000',
end='1/10/2000')
self.assertRaises(ValueError, DatetimeIndex, '1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
self.assertTrue(result.equals(expected))
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
self.assertTrue(result.equals(expected))
from_ints = DatetimeIndex(expected.asi8)
self.assertTrue(from_ints.equals(expected))
# non-conforming
self.assertRaises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'],
freq='D')
self.assertRaises(ValueError, DatetimeIndex,
start='2011-01-01', freq='b')
self.assertRaises(ValueError, DatetimeIndex,
end='2011-01-01', freq='B')
self.assertRaises(ValueError, DatetimeIndex, periods=10, freq='D')
def test_constructor_name(self):
idx = DatetimeIndex(start='2000-01-01', periods=1, freq='A',
name='TEST')
self.assertEqual(idx.name, 'TEST')
def test_comparisons_coverage(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
self.assertRaises(TypeError, rng.__lt__, rng[3].value)
result = rng == list(rng)
exp = rng == rng
self.assert_numpy_array_equal(result, exp)
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = [f(x) for x in rng]
self.assert_numpy_array_equal(result, exp)
def test_add_union(self):
rng = date_range('1/1/2000', periods=5)
rng2 = date_range('1/6/2000', periods=5)
result = rng + rng2
expected = rng.union(rng2)
self.assertTrue(result.equals(expected))
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
tm.assert_isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
self.assertTrue(idx.equals(list(idx)))
non_datetime = Index(list('abc'))
self.assertFalse(idx.equals(list(non_datetime)))
def test_union_coverage(self):
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
ordered = DatetimeIndex(idx.order(), freq='infer')
result = ordered.union(idx)
self.assertTrue(result.equals(ordered))
result = ordered[:0].union(ordered)
self.assertTrue(result.equals(ordered))
self.assertEqual(result.freq, ordered.freq)
def test_union_bug_1730(self):
rng_a = date_range('1/1/2012', periods=4, freq='3H')
rng_b = date_range('1/1/2012', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = DatetimeIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
self.assertTrue(result.equals(exp))
def test_union_bug_1745(self):
left = DatetimeIndex(['2012-05-11 15:19:49.695000'])
right = DatetimeIndex(['2012-05-29 13:04:21.322000',
'2012-05-11 15:27:24.873000',
'2012-05-11 15:31:05.350000'])
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_union_bug_4564(self):
from pandas import DateOffset
left = date_range("2013-01-01", "2013-02-01")
right = left + DateOffset(minutes=15)
result = left.union(right)
exp = DatetimeIndex(sorted(set(list(left)) | set(list(right))))
self.assertTrue(result.equals(exp))
def test_intersection_bug_1708(self):
from pandas import DateOffset
index_1 = date_range('1/1/2012', periods=4, freq='12H')
index_2 = index_1 + DateOffset(hours=1)
result = index_1 & index_2
self.assertEqual(len(result), 0)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
# result = rng + delta
# expected = rng + timedelta(2)
# self.assertTrue(result.equals(expected))
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
self.assertTrue(result.equals(ex))
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
self.assertEqual(idx.argmin(), 1)
self.assertEqual(idx.argmax(), 0)
def test_order(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.order()
self.assertTrue(ordered.is_monotonic)
ordered = idx.order(ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
ordered, dexer = idx.order(return_indexer=True)
self.assertTrue(ordered.is_monotonic)
self.assert_numpy_array_equal(dexer, [1, 2, 0])
ordered, dexer = idx.order(return_indexer=True, ascending=False)
self.assertTrue(ordered[::-1].is_monotonic)
self.assert_numpy_array_equal(dexer, [0, 2, 1])
def test_insert(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
result = idx.insert(2, datetime(2000, 1, 5))
exp = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-05',
'2000-01-02'])
self.assertTrue(result.equals(exp))
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([datetime(2000, 1, 4), 'inserted', datetime(2000, 1, 1),
datetime(2000, 1, 2)])
self.assertNotIsInstance(result, DatetimeIndex)
tm.assert_index_equal(result, expected)
idx = date_range('1/1/2000', periods=3, freq='M')
result = idx.insert(3, datetime(2000, 4, 30))
self.assertEqual(result.freqstr, 'M')
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = np.array([f(index[0])])
self.assert_numpy_array_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
tm.assert_isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
self.assertTrue((result['B'] == dr).all())
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
self.assertTrue(result.all())
result = index.isin(list(index))
self.assertTrue(result.all())
assert_almost_equal(index.isin([index[2], 5]),
[False, False, True, False])
def test_union(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = Int64Index(np.arange(10, 30, 2))
result = i1.union(i2)
expected = Int64Index(np.arange(0, 30, 2))
self.assert_numpy_array_equal(result, expected)
def test_union_with_DatetimeIndex(self):
i1 = Int64Index(np.arange(0, 20, 2))
i2 = DatetimeIndex(start='2012-01-03 00:00:00', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_time(self):
rng = pd.date_range('1/1/2000', freq='12min', periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
self.assertTrue((result == expected).all())
def test_date(self):
rng = pd.date_range('1/1/2000', freq='12H', periods=10)
result = | pd.Index(rng) | pandas.Index |
# -*- coding: utf-8 -*-
"""Compute statistical description of datasets"""
import multiprocessing
import itertools
from functools import partial
import numpy as np
import pandas as pd
import matplotlib
from pkg_resources import resource_filename
import pandas_profiling.formatters as formatters
import pandas_profiling.base as base
from pandas_profiling.plot import histogram, mini_histogram
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
# stats['kurtosis'] = series.kurt()
# stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name)
def describe_date_1d(series):
"""Compute summary statistics of a date (`TYPE_DATE`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
stats = dict()
stats['type'] = base.TYPE_DATE
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# Histograms
stats['histogram'] = histogram(series)
stats['mini_histogram'] = mini_histogram(series)
return pd.Series(stats, name=series.name)
def describe_categorical_1d(series):
"""Compute summary statistics of a categorical (`TYPE_CAT`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Only run if at least 1 non-missing value
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
names = []
result = []
if base.get_vartype(series) == base.TYPE_CAT:
names += ['top', 'freq', 'type']
result += [top, freq, base.TYPE_CAT]
return pd.Series(result, index=names, name=series.name)
def describe_boolean_1d(series):
"""Compute summary statistics of a boolean (`TYPE_BOOL`) variable (a Series).
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
value_counts, distinct_count = base.get_groupby_statistic(series)
top, freq = value_counts.index[0], value_counts.iloc[0]
# The mean of boolean is an interesting information
mean = series.mean()
names = []
result = []
names += ['top', 'freq', 'type', 'mean']
result += [top, freq, base.TYPE_BOOL, mean]
return | pd.Series(result, index=names, name=series.name) | pandas.Series |
#***************************************************************
# climo_4.ncl
#
# Concepts illustrated:
# - Drawing a latitude/time contour plot
# - Calculating a zonally averaged annual cycle
# - Setting contour colors using RGB triplets
# - Explicitly setting tickmarks and labels on the bottom X axis
# - Explicitly setting contour levels
# - Transposing an array
#
import numpy as np
import xarray as xr
import matplotlib as mpl
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
#****************************************************
# open file and read in monthly data
#****************************************************
# NOTE:
# This netCDF file is not CF-compliant. The time dimension is just integers
# of form YYMM. Once we check the file to discover this, we can take appropriate
# corrective measures. Since we want to make climatologies, we'd like to be able
# to use groupby with the time acccessor, so we need to get time into regular
# datetime objects. Here is one way to do it.
ds = xr.open_dataset("/Users/brianpm/Documents/www.ncl.ucar.edu/Applications/Data/cdf/xieArkin-T42.nc")
# correct time:
import pandas as pd
otime = ds['time'].astype(int)
times = []
for t in otime:
str_time = str(t.item())
yy = str_time[0:2]
mm = str_time[2:]
yyint = int(yy)
if (1900 + yyint) <= 2000:
yyyy = 1900 + yyint
else:
yyyy = 2000 + yyint
date = f'{yyyy}-{mm}-15'
# print(f"YEAR: {yy} MONTH: {mm} ==> DATE: {date}")
times.append(date)
time = | pd.to_datetime(times) | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# ## 17 - AgriPV - Jack Solar Site Modeling
# Modeling Jack Solar AgriPV site in Longmonth CO, for crop season May September. The site has two configurations:
#
#
# <b> Configuration A: </b>
# * Under 6 ft panels : 1.8288m
# * Hub height: 6 ft : 1.8288m
#
#
# Configuration B:
# * 8 ft panels : 2.4384m
# * Hub height 8 ft : 2.4384m
#
# Other general parameters:
# * Module Size: 3ft x 6ft (portrait mode)
# * Row-to-row spacing: 17 ft --> 5.1816
# * Torquetube: square, diam 15 cm, zgap = 0
# * Albedo = green grass
#
#
# ### Steps in this Journal:
# <ol>
# <li> <a href='#step1'> Load Bifacial Radiance and other essential packages</a> </li>
# <li> <a href='#step2'> Define all the system variables </a> </li>
# <li> <a href='#step3'> Build Scene for a pretty Image </a> </li>
# </ol>
#
# #### More details
# There are three methods to perform the following analyzis:
# <ul><li>A. Hourly with Fixed tilt, getTrackerAngle to update tilt of tracker </li>
# <li>B. Hourly with gendaylit1axis using the tracking dictionary </li>
# <li>C. Cumulatively with gencumsky1axis </li>
# </ul>
#
#
# The analysis itself is performed with the HPC with method A, and results are compared to GHI (equations below). The code below shows how to build the geometry and view it for accuracy, as well as evaluate monthly GHI, as well as how to model it with `gencumsky1axis` which is more suited for non-hpc environments.
#
#
#
# 
#
# <a id='step1'></a>
# ## 1. Load Bifacial Radiance and other essential packages
# In[1]:
import bifacial_radiance
import numpy as np
import os # this operative system to do the relative-path testfolder for this example.
import pprint # We will be pretty-printing the trackerdictionary throughout to show its structure.
from pathlib import Path
import pandas as pd
# <a id='step2'></a>
# ## 2. Define all the system variables
# In[2]:
testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'Tutorial_17')
if not os.path.exists(testfolder):
os.makedirs(testfolder)
timestamp = 4020 # Noon, June 17th.
simulationName = 'tutorial_17' # Optionally adding a simulation name when defning RadianceObj
#Location
lat = 40.1217 # Given for the project site at Colorado
lon = -105.1310 # Given for the project site at Colorado
# MakeModule Parameters
moduletype='test-module'
numpanels = 1 # This site have 1 module in Y-direction
x = 1
y = 2
#xgap = 0.15 # Leaving 15 centimeters between modules on x direction
#ygap = 0.10 # Leaving 10 centimeters between modules on y direction
zgap = 0 # no gap to torquetube.
sensorsy = 6 # this will give 6 sensors per module in y-direction
sensorsx = 3 # this will give 3 sensors per module in x-direction
torquetube = True
axisofrotationTorqueTube = True
diameter = 0.15 # 15 cm diameter for the torquetube
tubetype = 'square' # Put the right keyword upon reading the document
material = 'black' # Torque tube of this material (0% reflectivity)
# Scene variables
nMods = 20
nRows = 7
hub_height = 1.8 # meters
pitch = 5.1816 # meters # Pitch is the known parameter
albedo = 0.2 #'Grass' # ground albedo
gcr = y/pitch
cumulativesky = False
limit_angle = 60 # tracker rotation limit angle
angledelta = 0.01 # we will be doing hourly simulation, we want the angle to be as close to real tracking as possible.
backtrack = True
# In[3]:
test_folder_fmt = 'Hour_{}'
# <a id='step3'></a>
# # 3. Build Scene for a pretty Image
# In[4]:
idx = 272
test_folderinner = os.path.join(testfolder, test_folder_fmt.format(f'{idx:04}'))
if not os.path.exists(test_folderinner):
os.makedirs(test_folderinner)
rad_obj = bifacial_radiance.RadianceObj(simulationName,path = test_folderinner) # Create a RadianceObj 'object'
rad_obj.setGround(albedo)
epwfile = rad_obj.getEPW(lat,lon)
metdata = rad_obj.readWeatherFile(epwfile, label='center', coerce_year=2021)
solpos = rad_obj.metdata.solpos.iloc[idx]
zen = float(solpos.zenith)
azm = float(solpos.azimuth) - 180
dni = rad_obj.metdata.dni[idx]
dhi = rad_obj.metdata.dhi[idx]
rad_obj.gendaylit(idx)
# rad_obj.gendaylit2manual(dni, dhi, 90 - zen, azm)
#print(rad_obj.metdata.datetime[idx])
tilt = round(rad_obj.getSingleTimestampTrackerAngle(rad_obj.metdata, idx, gcr, limit_angle=65),1)
sceneDict = {'pitch': pitch, 'tilt': tilt, 'azimuth': 90, 'hub_height':hub_height, 'nMods':nMods, 'nRows': nRows}
scene = rad_obj.makeScene(module=moduletype,sceneDict=sceneDict)
octfile = rad_obj.makeOct()
# #### The scene generated can be viewed by navigating on the terminal to the testfolder and typing
#
# > rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 tutorial_17.oct
#
# #### OR Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
#
# In[5]:
## Comment the ! line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .0265652 -vp 2 -21 2.5 -vd 0 1 0 tutorial_17.oct
# <a id='step4'></a>
# # GHI Calculations
#
# ### From Weather File
# In[6]:
# BOULDER
# Simple method where I know the index where the month starts and collect the monthly values this way.
# In 8760 TMY, this were the indexes:
starts = [2881, 3626, 4346, 5090, 5835]
ends = [3621, 4341, 5085, 5829, 6550]
starts = [metdata.datetime.index(pd.to_datetime('2021-05-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-06-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-07-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-08-01 6:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-09-01 6:0:0 -7'))]
ends = [metdata.datetime.index(pd.to_datetime('2021-05-31 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-06-30 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-07-31 18:0:0 -7')),
metdata.datetime.index(pd.to_datetime('2021-08-31 18:0:0 -7')),
metdata.datetime.index( | pd.to_datetime('2021-09-30 18:0:0 -7') | pandas.to_datetime |
import pandas as pd
from textblob import TextBlob
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.util import ngrams
import string
from progress.bar import Bar
# filepath = '../data/filtered_train_data_all.csv'
# # filepath = 'toy_set.csv'
# df = pd.read_csv(filepath, index_col=0)
stopwords = set(stopwords.words('english'))
punctuation = string.punctuation
def sentence_sentiment(s):
testimonial = TextBlob(s)
# The polarity score is a float within the range [-1.0, 1.0].
# The subjectivity is a float within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is very subjective.
return testimonial.sentiment.polarity, testimonial.sentiment.subjectivity
def stop_word_removal(s):
tokens = word_tokenize(s)
filtered = [w for w in tokens if w not in stopwords and w not in punctuation and not w.isnumeric()]
return filtered
def sentence_processing(df):
""" This function performs two tasks:
- Sentiment extraction and write the results into a csv file
- Write filtered words into two text files, one for labelled data, one for unlabelled data
"""
pos_df, neg_df = df[df['target']==1], df[df['target']==0]
sentences, targets = df['question_text'].values, df['target'].values
sentiment_dict = {'sentiment':[], 'polarity':[], 'target':[]}
pos_tokens = []
neg_tokens = []
bar = Bar("Collecting sentiment over sentences", max=len(sentences))
for i in range(len(sentences)):
sentence = sentences[i]
target = targets[i]
sentiment, polarity = sentence_sentiment(sentence)
sentiment_dict['sentiment'].append(sentiment)
sentiment_dict['polarity'].append(polarity)
sentiment_dict['target'].append(target)
wordnet_lemmatizer = WordNetLemmatizer()
filtered = stop_word_removal(sentence)
stemmed = [wordnet_lemmatizer.lemmatize(w.lower()) for w in filtered]
if target:
pos_tokens.extend(stemmed)
else:
neg_tokens.extend(stemmed)
bar.next()
bar.finish()
sentiment_df = | pd.DataFrame(sentiment_dict) | pandas.DataFrame |
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import torch
import pandas as pd
import time
import os
def load_dataset(config, logger):
logger.info('Loading Dataset {IF Data present use it else download}')
path = os.path.join(config['directory']['main'], config['directory']['root_dir'])
train_data = datasets.MNIST(
root=path,
train=True,
transform=transforms.ToTensor(),
target_transform=None,
download=True
)
test_data = datasets.MNIST(
root=path,
train=False,
transform=transforms.ToTensor(),
target_transform=None,
download=True
)
logger.info('Data Loading Successful')
return train_data, test_data
def create_loader(train_data, test_data, config, logger):
logger.info('Creating data Loader')
train_data_loader = DataLoader(dataset=train_data,
batch_size=config['param']['batch_size'],
shuffle=config['param']['shuffle'])
test_data_loader = DataLoader(dataset=test_data,
batch_size=config['param']['batch_size'])
logger.info('Loaders Created and Returned')
return train_data_loader, test_data_loader
def get_unique_filename(filename, typ):
if typ == 'Plot':
unique_filename = time.strftime(f"{filename}._%Y_%m_%d_%H_%M.png")
return unique_filename
elif typ == 'Model':
unique_filename = time.strftime(f"{filename}._%Y_%m_%d_%H_%M.pt")
return unique_filename
else:
return None
def save_plot(loss, acc, name, path, logger):
logger.info('Saving Plots')
unique_name1 = get_unique_filename(name, typ='Plot')
path_to_plot1 = os.path.join(path, unique_name1)
fig = | pd.DataFrame(data={'Loss': loss, 'Accuracy': acc}) | pandas.DataFrame |
"""Tests for Safegraph process functions."""
from datetime import date
import tempfile
import os
import time
import numpy as np
import pandas as pd
from delphi_safegraph.process import (
aggregate,
construct_signals,
get_daily_source_files,
process,
process_window
)
from delphi_safegraph.run import SIGNALS
class TestProcess:
"""Tests for processing Safegraph indicators."""
def test_get_source_files(self):
with tempfile.TemporaryDirectory() as tmpdir:
os.makedirs(os.path.join(tmpdir, "social-distancing/2021/01/26"))
os.makedirs(os.path.join(tmpdir, "social-distancing/2021/01/27"))
# generate fake files
open(os.path.join(tmpdir, "social-distancing/2021/01/26/file.csv.gz"), 'w').close()
open(os.path.join(tmpdir, "social-distancing/2021/01/27/file1.csv.gz"), 'w').close()
# have second file generated as second after
# since last modified time has second level resolution
time.sleep(1)
open(os.path.join(tmpdir, "social-distancing/2021/01/27/file2.csv.gz"), 'w').close()
# only second file on 1/27 should be kept since it's more recent
filepath_dict = get_daily_source_files(
os.path.join(tmpdir, "social-distancing/**/*.csv.gz")
)
assert filepath_dict == {
date(2021, 1, 26): os.path.join(tmpdir, "social-distancing/2021/01/26/file.csv.gz"),
date(2021, 1, 27): os.path.join(tmpdir, "social-distancing/2021/01/27/file2.csv.gz")
}
def test_construct_signals_present(self):
"""Tests that all signals are constructed."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
assert 'completely_home_prop' in set(cbg_df.columns)
assert 'full_time_work_prop' in set(cbg_df.columns)
assert 'part_time_work_prop' in set(cbg_df.columns)
assert 'median_home_dwell_time' in set(cbg_df.columns)
def test_construct_signals_proportions(self):
"""Tests that constructed signals are actual proportions."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
assert np.all(cbg_df['completely_home_prop'].values <= 1)
assert np.all(cbg_df['full_time_work_prop'].values <= 1)
assert np.all(cbg_df['part_time_work_prop'].values <= 1)
def test_aggregate_county(self):
"""Tests that aggregation at the county level creates non-zero-valued
signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'county')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (1472, 17)
def test_aggregate_state(self):
"""Tests that aggregation at the state level creates non-zero-valued
signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'state')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (54, 17)
def test_aggregate_msa(self):
"""Tests that aggregation at the state level creates non-zero-valued signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'msa')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (372, 17)
def test_aggregate_hrr(self):
"""Tests that aggregation at the state level creates non-zero-valued signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'hrr')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (306, 17)
def test_aggregate_nation(self):
"""Tests that aggregation at the state level creates non-zero-valued signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'nation')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (1, 17)
def test_aggregate_hhs(self):
"""Tests that aggregation at the state level creates non-zero-valued signals."""
cbg_df = construct_signals(pd.read_csv('raw_data/sample_raw_data.csv'),
SIGNALS)
df = aggregate(cbg_df, SIGNALS, 'hhs')
assert np.all(df[f'{SIGNALS[0]}_n'].values > 0)
x = df[f'{SIGNALS[0]}_se'].values
assert np.all(x[~np.isnan(x)] >= 0)
assert df.shape == (10, 17)
def test_process_window(self, tmp_path):
"""Tests that processing over a window correctly aggregates signals."""
export_dir = tmp_path / 'export'
export_dir.mkdir()
df1 = pd.DataFrame(data={
'date_range_start': ['2020-02-14T00:00:00-05:00:00']*3,
'origin_census_block_group': [10539707003,
10539707003,
10730144081],
'device_count': [100, 200, 1000],
'completely_home_device_count': [2, 12, 40]
})
df2 = pd.DataFrame(data={
'date_range_start': ['2020-02-14T00:00:00-05:00:00'],
'origin_census_block_group': [10730144081],
'device_count': [2000],
'completely_home_device_count': [480]
})
process_window([df1, df2], ['completely_home_prop'], ['county'],
export_dir)
expected = pd.DataFrame(data={
'geo_id': [1053, 1073],
'val': [0.04, 0.14],
'se': [0.02, 0.10],
'sample_size': [2, 2]
})
actual = pd.read_csv(
export_dir / '20200214_county_completely_home_prop.csv')
| pd.testing.assert_frame_equal(expected, actual) | pandas.testing.assert_frame_equal |
from dash.dependencies import Input, Output, State
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import dash_table
import numpy as np
import plotly.express as px
from apps.app import dash_app
from apps.template import app_layout
import datetime as dt
import requests
from tqdm import tqdm
import mysql.connector
from mysql.connector import errorcode
import sqlalchemy as db
import sys
engine = db.create_engine('mysql+mysqldb://root:[email protected]:3306/Kindred')
dash_app = dash_app
dash_app.layout = app_layout()
app = dash_app.server
@dash_app.callback(
[Output(component_id='get_data_table',component_property='data'),
Output(component_id='get_data_table',component_property='columns'),
Output(component_id='get_success',component_property='children'),
Output(component_id='get_store',component_property='data')],
Input(component_id='get_data',component_property='n_clicks'),
prevent_initial_call=True
)
def get_goal_data(n_clicks):
def prem_year_mapper():
return {
'PREM_20_21': 667,
'PREM_19_20':639,
'PREM_18_19':614,
'PREM_17_18':586,
'PREM_16_17':556,
}
def prem_team_mapper2():
return {
'Manchester City':14,
'Manchester United':3,
'Leicester City': 13,
'Chelsea':8,
'Liverpool':4,
'West Ham United':21,
'Tottenham Hotspur':19,
'Everton':10,
'Arsenal':5,
'Leeds United':12,
'Aston Villa':24,
'Wolverhampton Wanderers':63,
'Southampton':18,
'Burnley':54,
'Newcastle United':16,
'Crystal Palace':35,
'Brighton & Hove Albion':749,
'Fulham':55,
'West Bromwich Albion':64,
'Sheffield United':27
}
def get_data(year,team):
base_url = f'https://www.statbunker.com/competitions/TopGoalScorers?comp_id={year}&club_id={team}'
html = requests.get(base_url).content
df_list = pd.read_html(html)
return df_list
def main():
"""return dataframe of concated team data"""
res = pd.DataFrame()
years = prem_year_mapper()
teams = prem_team_mapper2()
for m,s in years.items():
for k,v in tqdm(teams.items()):
try:
data = get_data(s,v)[0]
data['team'] = [k for x in range(len(data))]
data['year'] = [m for x in range(len(data))]
res = pd.concat([res,data],ignore_index=True)
except ValueError:
pass
return res
if n_clicks > 0:
res = main()
res_full = res.to_json(orient='records')
columns = [{'id':str(k),'name':str(k)} for k in res.columns]
res_red = res.head(30).to_dict(orient='records')
get_success='Yes'
else:
pass
return res_red, columns, get_success, res_full
@dash_app.callback(
[Output(component_id='check_data_table',component_property='data'),
Output(component_id='check_data_table',component_property='columns'),
Output(component_id='check_success',component_property='children'),
Output(component_id='check_store',component_property='data')],
[Input(component_id='check_data',component_property='n_clicks'),
Input(component_id='get_store',component_property='data')],
prevent_initial_call=True
)
def get_check_table(n_clicks,data):
data = pd.read_json(data)
# need to read from database and check
old_data = pd.read_sql_table('Top_Goal_Scorers',engine)
same_cols = [x for x in data.columns if x in old_data.columns]
diff = pd.concat([data[same_cols],old_data[same_cols]]).drop_duplicates(keep=False)
diff_store = diff.to_json(orient='records')
diff_res = diff.to_dict(orient='records')
diff_cols = [{'id':str(k),'name':str(k)} for k in diff.columns]
check_success = 'SUCCESS'
return diff_res, diff_cols, check_success, diff_store
@dash_app.callback(
Output(component_id='data_write',component_property='children'),
[Input(component_id='write_data',component_property='n_clicks'),
Input(component_id='check_store',component_property='data')]
)
def write_to_db(n_clicks,data):
data = | pd.read_json(data) | pandas.read_json |
"""
Convert MIMIC III data to CCDEF (hdf5 based)
"""
import numpy as np
import pandas as pd
#import sqlite3
import h5py
import json
import wfdb
from ccdef._utils import df_to_sarray
def patient_id_from_file(filename):
return int(os.path.basename(filename).split('p')[1].split('-')[0])
def labs_to_df (dset):
# extract values from dataset and convert to dataframe
# load metadata
tests_dict = json.loads(dset.attrs['.test_info'])
info_df = pd.DataFrame.from_dict(tests_dict, orient='columns').T
# create column for merge
info_df['testid'] = info_df.index.astype(int)
#load labs
labs_df = pd.DataFrame(dset[:])
#merge
df = pd.merge(labs_df, info_df, on = 'testid')
return (df)
class Admissions ():
def __init__ (self, path):
self.load(path)
def load(self, path):
print('Loading MIMIC admission data from {}'.format(path))
self.data = pd.read_csv(os.path.join(path, 'ADMISSIONS.csv'))
self.data['HADM_ID'] = self.data['HADM_ID'].fillna(0)
self.data['HADM_ID'] = self.data['HADM_ID'].astype({'HADM_ID':int})
self.data.columns = self.data.columns.str.strip().str.lower()
def get_admissions(self, subj_id):
df = self.data[self.data['subject_id']==subj_id]
return df
def find_admission (self, filename):
"""
find admission corresponding to the file based on the subject and start time
"""
subj_id = patient_id_from_file(filename)
print('searching {}'.format(subj_id))
# get additional demographics (gender, DOB) -> demographics class for this?
f = h5py.File(filename, 'r')
sig_start = pd.to_datetime(json.loads(f['/'].attrs['.meta'])['time_origin'])
print('Signal file start {}'.format(sig_start))
admits = self.get_admissions(subj_id)
for idx, row in admits.iterrows():
adm_time = pd.to_datetime(row['admittime']).tz_localize('US/Eastern')
dsc_time = | pd.to_datetime(row['dischtime']) | pandas.to_datetime |
import gensim
import numpy as np
import pandas as pd
import re
import os
import time
import jieba
import cv2
import json
import urllib
import random
import hashlib
from snownlp import sentiment
from snownlp import SnowNLP
import jieba.posseg as pseg
from gensim.models import word2vec
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import models, transforms
from PIL import Image
from tensorflow.keras.applications import vgg19
from tensorflow.keras.applications import resnet50
from tensorflow.keras.preprocessing import image
from collections import Counter
from scipy.linalg import norm
train_csv_path = r'G:\毕设\数据集\微博\train.csv'
text_csv_path = r'G:\毕设\数据集\微博\text.csv'
user_csv_path = r'G:\毕设\数据集\微博\user.csv'
image_csv_path = r'G:\毕设\数据集\微博\image.csv'
en_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_index.json'
cn_imagenet_class_path = r'G:\毕设\数据集\微博\imagenet_class_cn.json'
image_class_vgg19_score_path = r'G:\毕设\数据集\微博\image_class_vgg19.txt'
image_class_resnet50_score_path = r'G:\毕设\数据集\微博\image_class_resnet50.txt'
train_negative_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/negative.txt'
train_positive_corpus_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/positive.txt'
sentiment_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+'/util/sentiment.marshal'
stopwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/stopwords.txt"
word2vec_txt_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/word2vec_corpus.txt"
word2vec_model_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/text8.model"
possentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/possentiwords.txt"
negsentiwords_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+".")+"/util/negsentiwords.txt"
appid = '20190716000318328'
secretKey = '<KEY>'
url_baidu = 'http://api.fanyi.baidu.com/api/trans/vip/translate'
def train_data_read(train_csv_path):
"""
训练数据的读入
df_text 文本信息列
df_user 用户信息列
df_image 图片信息列
"""
logging.info("正在载入数据中...")
#微博信息
df_text = pd.read_csv(train_csv_path,usecols=['id','text','category','label'])
#用户信息
df_user = pd.read_csv(train_csv_path,usecols=['id','userGender','userFollowCount','userFansCount','userWeiboCount','userLocation','userDescription'])
#微博图片信息
df_image = pd.read_csv(train_csv_path,usecols=['id','piclist'])
logging.info("数据载入完成")
return df_text,df_user,df_image
def text_data_read():
'''
文本特征文件的读取
:return: 文本特征文件
'''
df_text = pd.read_csv(text_csv_path)
return df_text
def text_insert_cols(df_text,new_features_list):
'''
增加文本新的特征列,方便后续提取并补充值
:param df_text: 文本信息
:return: df_text: 新文本信息dataframe
'''
logging.info("正在扩展文本新特征列...")
col_name = list(df_text.columns)
# 插入新列之前列名去重
col_name = col_name + sorted(set(new_features_list) - set(col_name), key=new_features_list.index)
df_text = df_text.reindex(columns=col_name, fill_value=0)
logging.info("文本新特征列扩展完成")
return df_text
def text_feature_extraction(df_text):
logging.info("开始文本特征提取...")
# #统计字符串长度
# df_text['text_length'] = df_text['text'].str.len()
# #将情感分数列转为float
# df_text['sentiment_score'] = df_text['sentiment_score'].astype(float)
for j in range(1,65):
df_text['word2vec_'+str(j)] = df_text['word2vec_'+str(j)].astype(float)
# #其余数据统计
i = 0
for index, row in df_text.iterrows():
logging.info("处理进度"+str(i+1)+"/"+str(df_text.shape[0]))
#获得需要处理的文本内容
text_content = row['text']
# #获得是否含有问号以及问号的数量
# if row['num_questmarks'] > 0:
# df_text.at[i, 'contains_questmark'] = 1
# df_text.at[i,'contains_questmark'], df_text.at[i,'num_questmarks'] = text_questmark(text_content)
# #获得是否含有感叹号以及感叹号的数量
# if row['num_exclammarks'] > 0:
# df_text.at[i, 'contains_exclammark'] = 1
# df_text.at[i, 'contains_exclammark'], df_text.at[i, 'num_exclammarks'] = text_exclammark(text_content)
# #获得是否含有hashtag以及hashtag的数量
# if row['num_hashtags'] > 0:
# df_text.at[i, 'contains_hashtag'] = 1
# df_text.at[i, 'contains_hashtag'], df_text.at[i, 'num_hashtags'] = text_hashtag(text_content)
# #获得是否含有url以及url的数量
# if row['num_URLs'] > 0:
# df_text.at[i, 'contains_URL'] = 1
# df_text.at[i, 'contains_URL'], df_text.at[i, 'num_URLs'] = text_url(text_content)
# #获得是否含有@以及@的数量
# if row['num_mentions'] > 0:
# df_text.at[i, 'contains_mention'] = 1
# df_text.at[i, 'contains_mention'], df_text.at[i, 'num_mentions'] = text_mention(text_content)
# #获得文本情感分数
# df_text.at[i, 'sentiment_score'] = text_sentiment_score(text_content)
# #词性标注,统计名词、动词、代词数量并返回
# df_text.at[i, 'num_noun'],df_text.at[i, 'num_verb'],df_text.at[i, 'num_pronoun'] = text_part_of_speech(text_content)
# #计算每条微博正文的词向量均值
df_text.at[i,-64:] = text_compute_word2vec(text_content).tolist()
# #获得每条微博的积极词汇数、消极词汇数
# df_text.at[i, 'num_possentiwords'], df_text.at[i, 'num_negsentiwords'] = text_pos_neg_sentiwords(text_content)
#获取新闻是否含有第一人称、第二人称、第三人称
# df_text.at[i, 'contains_firstorderpron'], df_text.at[i, 'contains_secondorderpron'], df_text.at[i, 'contains_thirdorderpron'] = text_get_fir_sec_thi_orderpron(text_content)
i += 1
logging.info("文本特征提取结束...")
return df_text
def text_get_fir_sec_thi_orderpron(text_content):
"""
统计第一、二、三人称是否存在于微博中
:param text_content:
:return: has_first, has_second, has_third(0:不包含,1:包含)
"""
has_first = 0 #第一人称
has_second = 0 #第二人称
has_third = 0 #第三人称
if text_content.find('我') != -1:
has_first = 1
elif text_content.find('你') != -1:
has_second = 1
elif text_content.find('他') != -1 or text_content.find('她') != -1 or text_content.find('它') != -1:
has_third = 1
return has_first, has_second, has_third
def text_pos_neg_sentiwords(text_content):
# 去除停用词的分词String
new_text_content = jieba_clear_text(text_content)
#将词组转成list
list_new_text_content = new_text_content.split(' ')
#统计积极词、消极词
num_pos = 0
num_neg = 0
for word in list_new_text_content:
if word in possentiwords:
num_pos += 1
elif word in negsentiwords:
num_neg += 1
return num_pos,num_neg
def text_part_of_speech(text_content):
"""
将文本中的汉字进行词性标注并返回数量
:param text_content: 文本信息
:return: n名词数量,v动词数量,r代词数量
"""
#选取所有的汉字
if pd.isna(text_content):
return 0,0,0
words = pseg.cut("".join(re.findall(u"[\u4e00-\u9fa5]",text_content)))
n = 0 #名词数量
r = 0 #代词数量
v = 0 #动词数量
for w in words:
if (w.flag.startswith('n')):
n += 1
elif (w.flag.startswith('v')):
v += 1
elif (w.flag.startswith('r')):
r += 1
return n,v,r
def text_questmark(text_content):
"""
处理文本中的问号
:param text_content:处理对象文本
:return: 是否含有问号(1:有,0:无),问号数量
"""
en_questmark_nums = text_content.count("?")
cn_questmark_nums = text_content.count("?")
if(en_questmark_nums + cn_questmark_nums > 0):
return 1,en_questmark_nums + cn_questmark_nums
else:
return 0,0
def text_train_sentiment():
#微博语料训练
sentiment.train(train_negative_corpus_path,train_positive_corpus_path)
#保存模型,同时修改snownlp->sentiment->__init__.py->data_path
sentiment.save(sentiment_model_path)
def text_sentiment_score(text_content):
"""
获得文本的情感分数
0<------------------>1
消极 积极
:param text_content: 处理对象文本
:return: sentiment_score.sentiments 情感分数
"""
if | pd.isna(text_content) | pandas.isna |
import re
import os
import pandas as pd
import numpy as np
import pickle as pkl
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.utils import resample
from sklearn.utils import shuffle
from variables import train_data_path, test_data_path, preprocessed_eclothing_data, eclothing_data
from sklearn.metrics.pairwise import cosine_similarity
import matplotlib.pyplot as plt
from collections import Counter
def get_sentiment_data():
global train_data_path, test_data_path
if not os.path.exists(train_data_path) or not os.path.exists(test_data_path) or not os.path.exists(preprocessed_eclothing_data):
print("Upsampling data !!!")
df = pd.read_csv(eclothing_data)
data = df.copy()[['ID','Clothing ID','Review Text','Recommended IND']]
data['PreProcessed Text'] = data.apply(preprocessed_text_column, axis=1)
data.to_csv(preprocessed_eclothing_data, encoding='utf-8', index=False)
upsample_data(data)
train_data = | pd.read_csv(train_data_path) | pandas.read_csv |
import datetime
import re
import empyrical as em
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pyfolio as pf
import pymongo
import QUANTAXIS as QA
from qaenv import mongo_ip
#mongo_ip = '127.0.0.1'
def mergex(dict1, dict2):
dict1.update(dict2)
return dict1
def promise_list(value):
return value if isinstance(value, list) else [value]
class QA_QIFIMANAGER():
"""
用于管理单 qifi 的历史交易情况
--> 对标 QAAccount 的历史回测模式
--> 需要增加 QARisk/ QAPerformance 部分的支持
--> 需要增加对于 QAWEBSERVER 部分的支持
--> 需要增加对于 web 前端部分的支持
"""
def __init__(self, account_cookie, mongo_ip=mongo_ip, database='quantaxis', collection='history'):
self.database = self.change_database(database, collection)
self.database.create_index([("account_cookie", pymongo.ASCENDING),
("trading_day", pymongo.ASCENDING)], unique=True)
self.account_cookie = account_cookie
self.assets = self.get_historyassets()
self.trade = self.get_historytrade()
self.assets_start = self.assets.index[0]
self.assets_end = self.assets.index[-1]
self.benchmark_code = '000300'
self.benchmark_assets = self.get_benchmark_assets(
self.benchmark_code, self.assets_start, self.assets_end)
def __expr__(self):
return f"{self.account_cookie}- start: {self.assets_start} - end: {self.assets_end}- benchmark: {self.benchmark_code}"
def get_benchmark_assets(self, code, start, end):
return QA.QA_fetch_index_day_adv(code, start, end).data.reset_index(1).close
def set_benchmark_assets(self, assets):
self.benchmark_assets = assets.loc[self.assets_start, self.assets_end]
def change_database(self, database_name, collection_name):
return pymongo.MongoClient(mongo_ip).get_database(
database_name).get_collection(collection_name)
def get_qifislice(self, date):
return self.database.find_one({'account_cookie': self.account_cookie, 'trading_day': date})
@property
def returns(self):
returns = self.assets.pct_change()
returns.index = returns.index
return returns
@property
def benchmark_returns(self):
returns = self.benchmark_assets.pct_change()
try:
returns.index = returns.index
except:
pass
return returns
@property
def month_assets(self):
return self.assets.resample('M').last()
@property
def month_assets_profit(self):
res = pd.concat([pd.Series(self.assets.iloc[0]),
self.month_assets]).diff().dropna()
res.index = res.index.map(str)
return res
def get_historyassets(self, start='1990-01-01', end=str(datetime.date.today())) -> pd.Series:
b = [(item['accounts']['balance'], item['trading_day']) for item in self.database.find(
{'account_cookie': self.account_cookie}, {'_id': 0, 'accounts': 1, 'trading_day': 1})]
res = pd.DataFrame(b, columns=['balance', 'trading_day']).dropna()
print(res.trading_day[0])
res = res[res.trading_day.apply(lambda x: x!='')]
res = res.assign(datetime=pd.to_datetime(
res['trading_day']), balance=res.balance.apply(round, 2)).dropna().set_index('datetime').sort_index()
res = res.balance
res.name = self.account_cookie
print(res)
return res.bfill().ffill().sort_index().loc[start:end]
def get_historytrade(self,):
b = [item['trades'].values() for item in self.database.find(
{'account_cookie': self.account_cookie}, {'_id': 0, 'trades': 1, 'trading_day': 1})]
i = []
for ix in b:
i.extend(list(ix))
res = pd.DataFrame(i)
# print(res)
res = res.assign(account_cookie=res['user_id'], code=res['instrument_id'], tradetime=res['trade_date_time'].apply(
lambda x: datetime.datetime.fromtimestamp(x/1000000000))).set_index(['tradetime', 'code']).sort_index()
return res.drop_duplicates().sort_index()
def get_sharpe(self):
n = self.get_historyassets()
a = ((n.iloc[-1]/n.iloc[0] - 1)/len(n)*365) / \
abs((n.pct_change()*100).std())
return 0 if np.isnan(a) else a
def show_perf_stats(self, live_start_date=None):
pf.show_perf_stats(self.returns, self.benchmark_returns,
live_start_date=live_start_date)
def create_returns_tear_sheet(self, live_start_date=None):
pf.create_returns_tear_sheet(
self.returns, benchmark_rets=self.benchmark_returns, live_start_date=live_start_date)
plt.show()
class QA_QIFISMANAGER():
"""
用于管理多 qifi 的历史交易情况
--> 对标 QAAccount 的历史回测模式
--> 需要增加 QARisk/ QAPerformance 部分的支持
--> 需要增加对于 QAWEBSERVER 部分的支持
--> 需要增加对于 web 前端部分的支持
"""
def __init__(self, mongo_ip=mongo_ip, account_cookie='', model='BACKTEST'):
if model =='REALTIME':
self.database = pymongo.MongoClient(mongo_ip).QAREALTIME.account
else:
self.database = pymongo.MongoClient(mongo_ip).quantaxis.history
self.database.create_index([("account_cookie", pymongo.ASCENDING),
("trading_day", pymongo.ASCENDING)], unique=True)
def promise_list(self, value) -> list:
return value if isinstance(value, list) else [value]
def get_allportfolio(self) -> list:
print(self.database)
return list(set([i['portfolio'] for i in self.database.find({}, {'portfolio': 1, '_id': 0})]))
def get_portfolio_account(self, portfolio) -> list:
return list(set([i['account_cookie'] for i in self.database.find({'portfolio': portfolio}, {'account_cookie': 1, '_id': 0})]))
def query_re(self, text) -> list:
return list(set([i['account_cookie'] for i in self.database.find({'account_cookie': {"$regex": text}}, {'account_cookie': 1, '_id': 0})]))
def get_portfolio_panel(self, portfolio) -> pd.DataFrame:
r = self.get_portfolio_account(portfolio)
rp = [self.database.find_one({'account_cookie': i}, {
"accounts": 1, 'trading_day': 1, '_id': 0}) for i in r]
return pd.DataFrame([mergex(i['accounts'], {'trading_day': i['trading_day']}) for i in rp])
def get_allaccountname(self) -> list:
return list(set([i['account_cookie'] for i in self.database.find({}, {'account_cookie': 1, '_id': 0})]))
def get_historyassets(self, account_cookie, start='1990-01-01', end=str(datetime.date.today())) -> pd.Series:
b = [(item['accounts']['balance'], item['trading_day']) for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'accounts': 1, 'trading_day': 1})]
res = pd.DataFrame(b, columns=['balance', 'trading_day'])
res = res.assign(datetime=pd.to_datetime(
res['trading_day']), balance=res.balance.apply(round, 2)).set_index('datetime').sort_index()
res = res.balance
res.name = account_cookie
return res.bfill().ffill().loc[start:end]
def get_sharpe(self, n):
a = ((n.iloc[-1]/n.iloc[0] - 1)/len(n)*365) / \
abs((n.pct_change()*100).std())
return 0 if np.isnan(a) else a
def get_portfolio_assets(self, portfolio, start='1990-01-01', end=str(datetime.date.today())) -> pd.Series:
"""
KTKS_t05_au2106_15min KTKS_t04b_au2106_5min KTKS_t12_au2106_30min KTKS_t04_au2106_15min ... KTKS_t01_au2106_15min KTKS_t03_au2106_15min KTKS_t01b2_au2106_5min KTKS_t15_au2106_5min
datetime ...
2020-01-02 100000 100000 100000 100000 ... 100000 100000 100000 99340
2020-01-03 100000 100723 100000 100000 ... 101080 101099 102880 104310
2020-01-06 100000 108153 100000 100000 ... 108510 108529 110310 108830
2020-01-07 100000 104813 100000 100000 ... 104930 105189 110030 109790
"""
return pd.concat([self.get_historyassets(acc, start, end) for acc in portfolio], axis=1)
def get_historytrade(self, account_cookie):
b = [item['trades'].values() for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'trades': 1, 'trading_day': 1})]
i = []
for ix in b:
i.extend(list(ix))
res = pd.DataFrame(i)
# print(res)
res = res.assign(account_cookie=res['user_id'], code=res['instrument_id'], tradetime=res['trade_date_time'].apply(
lambda x: datetime.datetime.fromtimestamp(x/1000000000))).set_index(['tradetime', 'code']).sort_index()
return res.drop_duplicates().sort_index()
def get_historyorders(self, account_cookie):
b = [item['orders'].values() for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'orders': 1, 'trading_day': 1})]
i = []
for ix in b:
i.extend(list(ix))
res = pd.DataFrame(i)
res = res.assign(account_cookie=res['user_id'], code=res['instrument_id'], ordertime=res['insert_date_time'].apply(
lambda x: datetime.datetime.fromtimestamp(x/1000000000))).set_index(['ordertime', 'code']).sort_index()
return res.drop_duplicates().sort_index()
def rankstrategy(self, code):
res = pd.concat([self.get_historyassets(i) for i in code], axis=1)
res = res.fillna(method='bfill').ffill()
rp = (res.apply(self.get_sharpe) + res.tail(50).apply(self.get_sharpe) +
res.tail(10).apply(self.get_sharpe)).sort_values()
return rp[rp > 0.5].sort_values().tail(2)
def get_historypos(self, account_cookie):
b = [mergex(list(item['positions'].values())[0], {'trading_day': item['trading_day']}) for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'positions': 1, 'trading_day': 1})]
res = pd.DataFrame(b)
res.name = account_cookie
return res.set_index('trading_day')
def get_lastpos(self, account_cookie):
b = [mergex(list(item['positions'].values())[0], {'trading_day': item['trading_day']}) for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'positions': 1, 'trading_day': 1})]
res = pd.DataFrame(b)
res.name = account_cookie
return res.iloc[-1]
def get_historymargin(self, account_cookie):
b = [(item['accounts']['margin'], item['trading_day']) for item in self.database.find(
{'account_cookie': account_cookie}, {'_id': 0, 'accounts': 1, 'trading_day': 1})]
res = pd.DataFrame(b, columns=['balance', 'trading_day'])
res = res.assign(datetime=pd.to_datetime(
res['trading_day'])).set_index('datetime').sort_index()
res = res.balance
res.name = account_cookie
return res
def get_holding_panel(self, account_cookie, trading_day):
# print(self.database)
# print(account_cookie)
# print(trading_day)
b = list(self.database.find_one(
{'account_cookie': account_cookie, 'trading_day': trading_day}, {'_id': 0, 'positions': 1})['positions'].values())
res = pd.DataFrame(b)
res.name = account_cookie
return res.assign(code=res.instrument_id).set_index('code')
def get_holding_block(self, account_cookie, trading_day):
b = list(self.database.find_one(
{'account_cookie': account_cookie}, {'_id': 0, 'positions': 1})['positions'].values())
res = | pd.DataFrame(b) | pandas.DataFrame |
#! /bin/bash
# -*- coding: utf-8 -*-
import logging
import pandas as pd
import numpy as np
import click
from datetime import datetime
logger = logging.getLogger(__name__)
_COLS_TO_CONVERT = [
'market_data_current_price_usd',
'market_data_circulating_supply',
'market_data_ath_usd',
'market_data_high_24h_usd',
'market_data_low_24h_usd',
'KW1',
'KW2',
'KW3',
'KW4',
'KW5',
'KW6',
'KW7',
'KW8',
'KW9',
'KW10',
'KW11',
'KW12',
'KW13',
'KW14',
'KW15',
'KW16',
'KW17',
'KW18',
'KW19',
'KW20',
'KW21',
'KW22',
'KW23',
'KW24',
'KW25',
'KW26',
'KW27',
'KW28',
'KW29',
'KW30',
'KW31',
'KW32',
'KW33',
'KW34',
'KW35',
'KW36',
'KW37',
'KW38',
'KW39',
'ico_data_total_raised'
]
def read_in_data(path_bitcoin_df='data/raw/1_training_data_sets/1_bitcoin_price_data_set.csv',
path_training_df='data/raw/1_training_data_sets/1_training_data.csv',
path_test_df='data/raw/2_classification_data.csv'):
"""Function to read in data
Parameters
----------
path_bitcoin_df : str, optional
Path to bitcoin set, by default 'data/raw/1_training_data_sets/1_bitcoin_price_data_set.csv'
path_training_df : str, optional
Path to training set, by default 'data/raw/1_training_data_sets/1_training_data.csv'
path_test_df : str, optional
Path to training set, by default 'data/raw/2_classification_data.csv'
Returns
-------
tuple (df, df, df)
df_bitcoin, df, df_test
"""
df_bitcoin = pd.read_csv(
path_bitcoin_df, encoding="ISO-8859-1", delimiter=';')
df = pd.read_csv(path_training_df, encoding="ISO-8859-1")
df_test = | pd.read_csv(path_test_df, encoding="ISO-8859-1") | pandas.read_csv |
"""Module to provide generic utilities for other accelerometer modules."""
from collections import OrderedDict
import datetime
import json
import math
import os
import pandas as pd
import re
DAYS = ['mon', 'tue', 'wed', 'thur', 'fri', 'sat', 'sun']
TIME_SERIES_COL = 'time'
def formatNum(num, decimalPlaces):
"""return str of number formatted to number of decimalPlaces
When writing out 10,000's of files, it is useful to format the output to n
decimal places as a space saving measure.
:param float num: Float number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: Number formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.formatNum(2.567, 2)
2.57
"""
fmt = '%.' + str(decimalPlaces) + 'f'
return float(fmt % num)
def meanSDstr(mean, std, numDecimalPlaces):
"""return str of mean and stdev numbers formatted to number of decimalPlaces
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(std, numDecimalPlaces))
outStr += ')'
return outStr
def meanCIstr(mean, std, n, numDecimalPlaces):
"""return str of mean and 95% confidence interval numbers formatted
:param float mean: Mean number to be formatted.
:param float std: Standard deviation number to be formatted.
:param int n: Number of observations
:param int decimalPlaces: Number of decimal places for output format
:return: String formatted to number of decimalPlaces
:rtype: str
:Example:
>>> import accUtils
>>> accUtils.meanSDstr(2.567, 0.089, 2)
2.57 (0.09)
"""
stdErr = std / math.sqrt(n)
lowerCI = mean - 1.96 * stdErr
upperCI = mean + 1.96 * stdErr
outStr = str(formatNum(mean, numDecimalPlaces))
outStr += ' ('
outStr += str(formatNum(lowerCI, numDecimalPlaces))
outStr += ' - '
outStr += str(formatNum(upperCI, numDecimalPlaces))
outStr += ')'
return outStr
def toScreen(msg):
"""Print msg str prepended with current time
:param str mgs: Message to be printed to screen
:return: Print msg str prepended with current time
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.toScreen("hello")
2018-11-28 10:53:18 hello
"""
timeFormat = '%Y-%m-%d %H:%M:%S'
print(f"\n{datetime.datetime.now().strftime(timeFormat)}\t{msg}")
def writeCmds(accDir, outDir, cmdsFile='processCmds.txt', accExt="cwa", cmdOptions="", filesCSV=None):
"""Generate a text file listing processing commands for files found under accDir/
:param str accDir: Directory with accelerometer files to process
:param str outDir: Output directory to be created containing the processing results
:param str cmdsFile: Output .txt file listing all processing commands
:param str accExt: Acc file type e.g. cwa, CWA, bin, BIN, gt3x...
:param str cmdOptions: String of processing options e.g. "--epochPeriod 10"
Type 'python3 accProccess.py -h' for full list of options
:return: New file written to <cmdsFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.writeProcessingCommands("myAccDir/", "myResults/", "myProcessCmds.txt")
<cmd options written to "myProcessCmds.txt">
"""
# Use filesCSV if provided, else retrieve all accel files under accDir/
if filesCSV in os.listdir(accDir):
filesCSV = pd.read_csv(os.path.join(accDir, filesCSV), index_col="fileName")
filesCSV.index = accDir.rstrip("/") + "/" + filesCSV.index.astype('str')
filePaths = filesCSV.index.to_numpy()
else:
filesCSV = None
# List all accelerometer files under accDir/
filePaths = []
accExt = accExt.lower()
for root, dirs, files in os.walk(accDir):
for file in files:
if file.lower().endswith((accExt,
accExt + ".gz",
accExt + ".zip",
accExt + ".bz2",
accExt + ".xz")):
filePaths.append(os.path.join(root, file))
with open(cmdsFile, 'w') as f:
for filePath in filePaths:
# Use the file name as the output folder name for the process,
# keeping the same directory structure of accDir/
# Example: If filePath is {accDir}/group0/subject123.cwa then
# outputFolder will be {outDir}/group0/subject123/
outputFolder = filePath.replace(accDir.rstrip("/"), outDir.rstrip("/")).split(".")[0]
cmd = f"accProcess {filePath} --outputFolder {outputFolder} {cmdOptions}"
if filesCSV is not None:
# Grab additional options provided in filesCSV (e.g. calibration params)
cmdOptionsCSV = ' '.join(['--{} {}'.format(col, filesCSV.loc[filePath, col])
for col in filesCSV.columns])
cmd += " " + cmdOptionsCSV
f.write(cmd)
f.write('\n')
print('Processing list written to ', cmdsFile)
def collateSummary(resultsDir, outputCsvFile="all-summary.csv"):
"""Read all *-summary.json files under <resultsDir> and merge into one CSV file
Each json file represents summary data for one participant.
Therefore output CSV file contains summary for all participants.
:param str resultsDir: Directory containing JSON files
:param str outputCsvFile: Output CSV filename
:return: New file written to <outputCsvFile>
:rtype: void
:Example:
>>> import accUtils
>>> accUtils.collateSummary("data/", "data/all-summary.csv")
<summary CSV of all participants/files written to "data/all-summary.csv">
"""
# Load all *-summary.json files under resultsDir/
jdicts = []
for root, dirs, files in os.walk(resultsDir):
for file in files:
if file.lower().endswith("-summary.json"):
with open(os.path.join(root, file), 'r') as f:
jdicts.append(json.load(f, object_pairs_hook=OrderedDict))
summary = | pd.DataFrame.from_dict(jdicts) | pandas.DataFrame.from_dict |
import glob
import pandas as pd
import sys
files = sys.argv[1]
out_file = sys.argv[2]
data_frame = pd.read_csv(files.split(',')[0],sep='\t')
for file in files.split(',')[1:]:
df1 = | pd.read_csv(file,sep='\t') | pandas.read_csv |
#-- -- -- -- Intermediate Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
####### -----> Matplotlib
### --------------------------------------------------------
## Line plot - ex#0
# Print the last item from year and pop
print(year[-1])
print(pop[-1])
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Make a line plot: year on the x-axis, pop on the y-axis
plt.plot(year, pop)
# Display the plot with plt.show()
plt.show()
### --------------------------------------------------------
## Line plot - ex#1
import matplotlib.pyplot as plt
# Print the last item of gdp_cap and life_exp
print(gdp_cap[-1])
print(life_exp[-1])
# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis
plt.plot(gdp_cap, life_exp)
# Display the plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex0
import matplotlib.pyplot as plt
# Change the line plot below to a scatter plot
plt.scatter(gdp_cap, life_exp)
# Put the x-axis on a logarithmic scale
plt.xscale('log')
# Show plot
plt.show()
### --------------------------------------------------------
## Scatter Plot --- ex1
# Import package
import matplotlib.pyplot as plt
# Build Scatter plot
plt.scatter(pop, life_exp)
# Show plot
plt.show()
## HISTOGRAMS
### --------------------------------------------------------
### -> Build a histogram
import matplotlib.pyplot as plt
# Create histogram of life_exp data
plt.hist(life_exp)
# Display histogram
plt.show()
### --------------------------------------------------------
## Build a histogram --- bins
import matplotlib.pyplot as plt
# Build histogram with 5 bins
plt.hist(life_exp, bins=5)
# Show and clean up plot
plt.show()
plt.clf()
# Build histogram with 20 bins
plt.hist(life_exp, bins=20)
# Show and clean up again
plt.show()
plt.clf()
### --------------------------------------------------------
## Build a histogram --- compare
import matplotlib.pyplot as plt
# Histogram of life_exp, 15 bins
plt.hist(life_exp, bins=15)
# Show and clear plot
plt.show()
plt.clf()
# Histogram of life_exp1950, 15 bins
plt.hist(life_exp1950, bins=15)
# Show and clear plot again
plt.show()
plt.clf()
### --------------------------------------------------------
# You're a professor teaching Data Science with Python,
# and you want to visually assess if the grades on
# your exam follow a particular distribution.
# Which plot do you use?
# R/ Histogram
### --------------------------------------------------------
# You're a professor in Data Analytics with Python, and you
# want to visually assess if longer answers on exam
# questions lead to higher grades.
# Which plot do you use?
# Scatter plot
### --------------------------------------------------------
### Labels
import matplotlib.pyplot as plt
# Basic scatter plot, log scale
plt.scatter(gdp_cap, life_exp)
plt.xscale('log')
# Strings
xlab = 'GDP per Capita [in USD]'
ylab = 'Life Expectancy [in years]'
title = 'World Development in 2007'
# Add axis labels
plt.xlabel(xlab)
plt.ylabel(ylab)
# Add title
plt.title(title)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
## Ticks
import matplotlib.pyplot as plt
# Scatter plot
plt.scatter(gdp_cap, life_exp)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
# Definition of tick_val and tick_lab
tick_val = [1000, 10000, 100000]
tick_lab = ['1k', '10k', '100k']
# Adapt the ticks on the x-axis
plt.xticks(tick_val, tick_lab)
# After customizing, display the plot
plt.show()
### --------------------------------------------------------
#Sizes
# Import numpy as np
import numpy as np
# Store pop as a numpy array: np_pop
np_pop = np.array(pop)
# Double np_pop
np_pop = np_pop*2
# Update: set s argument to np_pop
plt.scatter(gdp_cap, life_exp, s = np_pop)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])
# Display the plot
plt.show()
### --------------------------------------------------------
### Colors
import matplotlib.pyplot as plt
# Specify c and alpha inside plt.scatter()
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Show the plot
plt.show()
### --------------------------------------------------------
## Additional Customizations
# Scatter plot
plt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)
# Previous customizations
plt.xscale('log')
plt.xlabel('GDP per Capita [in USD]')
plt.ylabel('Life Expectancy [in years]')
plt.title('World Development in 2007')
plt.xticks([1000,10000,100000], ['1k','10k','100k'])
# Additional customizations
plt.text(1550, 71, 'India')
plt.text(5700, 80, 'China')
# Add grid() call
plt.grid(True)
# Show the plot
plt.show()
### --------------------------------------------------------
#### INTERPRETATION
# If you have a look at your colorful plot,
# it's clear that people live longer in countries with a
# higher GDP per capita. No high income countries have r
# eally short life expectancy, and no low income countries
# have very long life expectancy. Still, there is a huge
# difference in life expectancy between countries on the same
# income level. Most people live in middle income countries
# where difference in lifespan is huge between countries;
# depending on how income is distributed and how it is used.
# What can you say about the plot?
## R/ The countries in blue, corresponding to Africa, have
# both low life expectancy and a low GDP per capita.
### Dictionaries, Part 1
### --------------------------------------------------------
### --->Motivation for dictionaries
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# Get index of 'germany': ind_ger
ind_ger = countries.index('germany')
# Use ind_ger to print out capital of Germany
print(capitals[ind_ger])
### --------------------------------------------------------
## Create dictionary
# Definition of countries and capital
countries = ['spain', 'france', 'germany', 'norway']
capitals = ['madrid', 'paris', 'berlin', 'oslo']
# From string in countries and capitals, create dictionary europe
europe = {
'spain':'madrid',
"france":"paris",
"germany":"berlin",
"norway":"oslo"}
# Print europe
print(europe)
### --------------------------------------------------------
## Access dictionary
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Print out the keys in europe
print(europe.keys())
# Print out value that belongs to key 'norway'
print(europe['norway'])
### Dictionaries, Part 2
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 0
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'berlin', 'norway':'oslo' }
# Add italy to europe
europe["italy"] = 'rome'
# Print out italy in europe
print('italy' in europe)
# Add poland to europe
europe["poland"] = 'warsaw'
# Print europe
print(europe)
### --------------------------------------------------------
### ---> Dictionary Manipulation - ex 1
# Definition of dictionary
europe = {'spain':'madrid', 'france':'paris', 'germany':'bonn',
'norway':'oslo', 'italy':'rome', 'poland':'warsaw',
'australia':'vienna' }
# Update capital of germany
europe['germany'] = 'berlin'
# Remove australia
del(europe['australia'])
# Print europe
print(europe)
### --------------------------------------------------------
## Dictionariception
# Dictionary of dictionaries
europe = { 'spain': { 'capital':'madrid', 'population':46.77 },
'france': { 'capital':'paris', 'population':66.03 },
'germany': { 'capital':'berlin', 'population':80.62 },
'norway': { 'capital':'oslo', 'population':5.084 } }
# Print out the capital of France
print(europe['france']['capital'])
# Create sub-dictionary data
data = {'capital': 'rome', 'population': 59.83}
# Add data to europe under key 'italy'
europe['italy'] = data
# Print europe
print(europe)
### Pandas, Part 1
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
# Pre-defined lists
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
# Import pandas as pd
import pandas as pd
# Create dictionary my_dict with three key:value pairs: my_dict
my_dict = {'country': names, 'drives_right': dr, 'cars_per_cap': cpc}
# Build a DataFrame cars from my_dict: cars
cars = pd.DataFrame(my_dict)
# Print cars
print(cars)
### --------------------------------------------------------
#### ---->>> Dictionary to DataFrame -- ex#0
import pandas as pd
# Build cars DataFrame
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
cars_dict = { 'country':names, 'drives_right':dr, 'cars_per_cap':cpc }
cars = pd.DataFrame(cars_dict)
print(cars)
# Definition of row_labels
row_labels = ['US', 'AUS', 'JPN', 'IN', 'RU', 'MOR', 'EG']
# Specify row labels of cars
cars.index = row_labels
# Print cars again
print(cars)
### --------------------------------------------------------
### CSV to DataFrame --- ex#0
# Import pandas as pd
import pandas as pd
# Import the cars.csv data: cars
cars = | pd.read_csv('cars.csv') | pandas.read_csv |
"""
generate paper figures
"""
from __future__ import print_function
import ast
import datetime
import os
import numpy as np
import pandas as pd
from ccdc.cavity import Cavity
from ccdc.io import MoleculeReader
from pipeline import HotspotPipeline
from hotspots.hs_io import HotspotReader
from hotspots.grid_extension import Grid
class Hot(HotspotPipeline):
def _get_ligand_cavity(self):
# self.ligand_cavity = os.path.join(self.working_dir, "ligand_cavity.dat")
# tolerance = 0
# lc = []
# mols = [MoleculeReader(path)[0] for other_id, lig_dic in self.extracted_ligands.items()
# for l, path in lig_dic.items()]
#
# point = [round(np.mean([a.coordinates.x for mol in mols for a in mol.heavy_atoms])),
# round(np.mean([a.coordinates.y for mol in mols for a in mol.heavy_atoms])),
# round(np.mean([a.coordinates.z for mol in mols for a in mol.heavy_atoms]))]
#
# cavs = Cavity.from_pdb_file(self.apo_prep)
#
# for i, c in enumerate(cavs):
#
# mini = c.bounding_box[0]
# maxi = c.bounding_box[1]
# if all([mini.x - tolerance < point[0] < maxi.x + tolerance,
# mini.y - tolerance < point[1] < maxi.y + tolerance,
# mini.z - tolerance < point[2] < maxi.z + tolerance]):
#
# lc.append(int(i))
#
with open(self.ligand_cavity, "r") as f:
lc = f.read().split(",")
if len(lc) < 1:
print("no ligand cavity for {}".format(self.apo))
return lc
else:
return lc
def _get_overlap_dic(self, cav, other_id, lig_id):
"""
:param cav:
:param other_id:
:param lig_id:
:return:
"""
if os.path.exists(self.atomic_overlaps[cav][other_id][lig_id]):
with open(self.atomic_overlaps[cav][other_id][lig_id], "r") as f:
x = f.read()
return ast.literal_eval(x)
else:
return None
def _get_vol_data(self, cav, other_id, lig_id):
"""
extract the volume data
:param int cav: cavity identifier
:param str other_id: protein identifier
:param str lig_id: ligand identifier
:return:
"""
if os.path.exists(self.all_overlaps[cav][other_id][lig_id]):
with open(self.all_overlaps[cav][other_id][lig_id], "r") as f:
vol = f.read()
return float(vol)
else:
return 0.0
def _is_top_cavity(self, cav):
"""
determine if top ranked cavity
:param cav:
:return:
"""
if os.path.exists(self.cavity_rank):
with open(self.cavity_rank, "r") as f:
print(self.cavity_rank)
c = f.read()
try:
if int(cav) == int(c):
return True
else:
return False
except:
return "NaN"
else:
return "NaN"
def _max_overlap(self, l):
try:
vals = []
for cav_id, prot_dic in self.all_overlaps.items():
for prot_id, lig_dic in prot_dic.items():
for lig_id, path in lig_dic.items():
if os.path.exists(path) and l == lig_id:
with open(path, 'r') as f:
vals.append(float(f.read()))
return max(vals)
except:
return 0
def _get_cavity_score(self, cav_id):
"""
retrieve cavity score
:param cav_id:
:return:
"""
if os.path.exists(self.cavity_score[cav_id]):
with open(self.cavity_score[cav_id], 'r') as f:
score = float(f.read())
return score
else:
return "NaN"
def analysis(self):
"""
report the volume analysis
:return:
"""
keys = ["apo", "buriedness_method", "cavity_id", "other_id", "ligand_id", "cavity_score",
"top_cavity", "volume_overlap", "atomic_label", "atomic_overlap", "atom_type", "ligand_cavity"]
data = []
self._get_cavities(min_vol=200)
lc = self._get_ligand_cavity()
for cav in range(len(self.cavities)):
v1 = self._is_top_cavity(cav)
score = self._get_cavity_score(cav)
if int(cav) in lc:
lc_bool = True
else:
lc_bool = False
for i, prot_id in enumerate(self.protein_id):
for lig_id in self.ligand_id[i]:
atomic_overlap_dic = self._get_overlap_dic(cav, prot_id, lig_id)
v2 = self._get_vol_data(cav, prot_id, lig_id)
if atomic_overlap_dic == None:
pass
else:
for n, atom_dic in atomic_overlap_dic.items():
for label, overlap in atom_dic.items():
data.append(
[self.apo, self.buriedness_method, cav, prot_id, lig_id, score, v1, v2,
label, overlap, n, lc_bool])
return pd.DataFrame(dict(zip(keys, (zip(*data)))))
def table(self):
"""
report the volume analysis
:return:
"""
keys = ["apo", "prot_id", "ligand_id", self.buriedness_method]
data = []
self._get_cavities(min_vol=200)
lc = self._get_ligand_cavity()
if len(lc) == 0:
for i, prot_id in enumerate(self.protein_id):
for lig_id in self.ligand_id[i]:
v2 = self._get_vol_data(0, prot_id, lig_id)
data.append([self.apo, prot_id, lig_id, v2])
else:
for cav in range(len(self.cavities)):
if int(cav) in lc:
for i, prot_id in enumerate(self.protein_id):
for lig_id in self.ligand_id[i]:
v2 = self._get_vol_data(cav, prot_id, lig_id)
data.append([self.apo, prot_id, lig_id, v2])
return pd.DataFrame(dict(zip(keys, (zip(*data)))))
def bcv_effect(self):
def get_val(path):
if os.path.exists(path):
with open(path, 'r') as f:
val = f.read()
print(path, val.split(","))
return val.split(",")
else:
return None
thresholds = [10, 14, 17]
li = []
pr = []
x = []
y = []
s = []
self._get_cavities(min_vol=200)
for cav in range(len(self.cavities)):
lc = self._get_ligand_cavity()
print(lc)
if int(cav) in lc:
# major volume on X, minor volume on y
for prot_id, lig_dic in self.hot_lig_overlaps[cav].items():
print(lig_dic)
for lig_id, path in lig_dic.items():
li.append(lig_id)
pr.append(prot_id)
x.append(get_val(self.bcv_lig_overlaps[cav][prot_id][lig_id])[0])
y.append(get_val(self.bcv_hot_overlaps[cav][prot_id][lig_id])[0])
s.append('bcv')
for i, t in enumerate(thresholds):
li.append(lig_id)
pr.append(prot_id)
x.append(get_val(self.hot_lig_overlaps[cav][prot_id][lig_id])[i])
y.append(get_val(self.hot_hot_overlaps[cav][prot_id][lig_id])[i])
s.append(str(t))
return li, pr, x, y, s
def cav_time(self):
def get_val(path):
if os.path.exists(path):
with open(path, 'r') as f:
val = f.read()
print(path, val.split(","))
return val.split(",")
else:
return None
cavi = []
time = []
step = []
x = []
y = []
z = []
zmean = []
_cavi = []
_x = []
_y = []
_zmean = []
self._get_cavities(min_vol=200)
for cav in self.superstar_time.keys():
_z = []
if cav == 'global':
cavi.extend(['global'] * 4)
time.append(get_val(self.superstar_time[cav])[0])
step.append('superstar')
time.append(get_val(self.hotspot_time[cav])[0])
step.append('hotspot')
for prot_id, lig_dic in self.hot_lig_overlaps[cav].items():
for lig_id, path in lig_dic.items():
try:
z.append(float(get_val(self.bcv_time[cav][prot_id][lig_id])[0]))
except:
z.append(0)
time.append(np.mean(z))
step.append('bcv')
time.append(float(get_val(self.superstar_time[cav])[0]) +
float(get_val(self.hotspot_time[cav])[0]) +
np.mean(z))
step.append('total')
else:
_x.append(float(get_val(self.superstar_time[cav])[0]))
_y.append(float(get_val(self.hotspot_time[cav])[0]))
for prot_id, lig_dic in self.hot_lig_overlaps[cav].items():
for lig_id, path in lig_dic.items():
try:
_z.append(float(get_val(self.bcv_time[cav][prot_id][lig_id])[0]))
except:
_z.append(0)
_zmean.append(np.mean(_z))
cavi.extend(['cavity'] * 4)
time.append(sum(_x))
step.append('superstar')
time.append(sum(_y))
step.append('hotspot')
time.append(sum(_zmean))
step.append('bcv')
time.append(sum(_x) + sum(_y) + sum(_zmean))
step.append('total')
return cavi, time, step
def main():
prefix = "/vagrant/github_pkgs/hotspots/examples/7_bcv_validation"
buriedness_methods = ['ligsite', 'ghecom', 'ghecom_internal']
df = pd.read_csv("inputs.csv")
frags = set(df['fragment'])
leads = set(df['lead'])
hot_pdbs = set(df['apo'])
reports = []
for i, pdb in enumerate(hot_pdbs):
for method in buriedness_methods:
ligands = list(df.loc[df['apo'] == pdb]['fragment_ID']) + list(df.loc[df['apo'] == pdb]['lead_ID'])
proteins = list(df.loc[df['apo'] == pdb]['fragment']) + list(df.loc[df['apo'] == pdb]['lead'])
hp = Hot(apo=pdb, buriedness_method=method, protein_id=proteins, ligand_id=ligands)
report = hp.analysis()
reports.append(report)
print(report)
dat = | pd.concat(reports, ignore_index=True) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode
#used_recall_source = 'i2i_w02-b2b-i2i2i'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10-i2i2b'
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source}')
def feat_item_sum_mean_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['sum','mean'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['sum','mean'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_sum_sim_loc_time_weight(data):
df = data.copy()
df = df[ ['index','sim_weight','loc_weight','time_weight'] ]
feat = df[ ['index'] ]
feat['sum_sim_loc_time_weight'] = df['sim_weight'] + df['loc_weight'] + df['time_weight']
feat = feat[ ['sum_sim_loc_time_weight'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_text_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_cossim'] ]
return feat
def feat_road_item_text_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text )
return a
else:
return np.nan
feat['road_item_text_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_eulasim'] ]
return feat
def feat_road_item_text_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text, ord=1 )
return a
else:
return np.nan
feat['road_item_text_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_mansim'] ]
return feat
def feat_road_item_image_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
c = np.dot( item1_image, item2_image )
a = np.linalg.norm( item1_image )
b = np.linalg.norm( item2_image )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_image_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_cossim'] ]
return feat
def feat_road_item_image_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image )
return a
else:
return np.nan
feat['road_item_image_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_eulasim'] ]
return feat
def feat_road_item_image_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image, ord=1 )
return a
else:
return np.nan
feat['road_item_image_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_mansim'] ]
return feat
def feat_i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
i2i_sim_seq = {}
st0 = time.time()
tot = 0
for user, items in user_item_dict.items():
times = user_time_dict[user]
if tot % 500 == 0:
print( f'tot: {len(user_item_dict)}, now: {tot}' )
tot += 1
for loc1, item in enumerate(items):
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
if (item,relate_item) not in new_keys:
continue
t1 = times[loc1]
t2 = times[loc2]
i2i_sim_seq.setdefault((item,relate_item), [])
i2i_sim_seq[ (item,relate_item) ].append( (loc1, loc2, t1, t2, len(items) ) )
st1 = time.time()
print(st1-st0)
return i2i_sim_seq
def feat_i2i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:50]:
res[i[0]]=i[1]
sim_item_p2[key] = res
i2i2i_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if item2 == item1:
continue
for item3 in sim_item_p2[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2i_sim_seq.setdefault((item1,item3), [])
i2i2i_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], sim_item_p2[item2][item3],
sim_item_p1[item1][item2], sim_item_p1[item2][item3] ) )
return i2i2i_sim_seq
def feat_i2i2b_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:100]:
res[i[0]]=i[1]
sim_item_p2[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:100]:
blend_score[i][j] = cij
i2i2b_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if (item2 == item1) or (item2 not in blend_score.keys()):
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2b_sim_seq.setdefault((item1,item3), [])
i2i2b_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], blend_score[item2][item3],
sim_item_p1[item1][item2], blend_score[item2][item3] ) )
return i2i2b_sim_seq
def feat_i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
for key in new_keys:
if np.isnan( result[key] ):
continue
result[key] = result[key] / ((item_cnt[key[0]] * item_cnt[key[1]]) ** 0.2)
print('Finished getting result')
feat['i2i_sim'] = feat['new_keys'].map(result)
#import pdb
#pdb.set_trace()
#i2i_seq_feat = pd.concat( [feat,i2i_seq_feat], axis=1 )
#i2i_seq_feat['itemAB'] = i2i_seq_feat['road_item'].astype('str') + '-' + i2i_seq_feat['item'].astype('str')
feat = feat[ ['i2i_sim'] ]
return feat
def feat_i2i_sim_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += loc_weight
feat['i2i_sim_abs_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_abs_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
loc_diff = loc1-loc2
loc_weight = (loc_base**loc_diff)
if abs(loc_weight) <= 0.2:
if loc_weight > 0:
loc_weight = 0.2
else:
loc_weight = -0.2
result[key] += loc_weight
feat['i2i_sim_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_abs_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
result[key] += time_weight
feat['i2i_sim_abs_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_sim_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - (t1 - t2) * 100)
if abs(time_weight)<=0.2:
if time_weight > 0:
time_weight = 0.2
else:
time_weight = -0.2
result[key] += time_weight
feat['i2i_sim_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - abs(t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = abs(loc2-loc1)
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
if loc1-loc2>0:
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
else:
result[key] -= 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_mean_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += ( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len) ) / len(records)
feat['i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_sum_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] + item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_sum_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_sum_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_multi_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] * item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_multi_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_multi_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_b2b_sim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
blend_sim = utils.load_sim(item_blend_sim_path)
b2b_sim = {}
for item in blend_sim:
i = item[0]
b2b_sim.setdefault(i,{})
for j,cij in item[1][:100]:
b2b_sim[i][j] = cij
vals = feat[ ['road_item','item'] ].values
result = []
for val in vals:
item1 = val[0]
item2 = val[1]
if item1 in b2b_sim.keys():
if item2 in b2b_sim[item1].keys():
result.append( b2b_sim[ item1 ][ item2 ] )
else:
result.append( np.nan )
else:
result.append( np.nan )
feat['b2b_sim'] = result
feat = feat[ ['b2b_sim'] ]
return feat
def feat_itemqa_loc_diff(data):
df = data.copy()
feat = df[ ['index','query_item_loc','road_item_loc'] ]
feat['itemqa_loc_diff'] = feat['road_item_loc'] - feat['query_item_loc']
def func(s):
if s<0:
return -s
return s
feat['abs_itemqa_loc_diff'] = feat['itemqa_loc_diff'].apply(func)
feat = feat[ ['itemqa_loc_diff','abs_itemqa_loc_diff'] ]
return feat
def feat_sim_three_weight(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean'] = feat['com_item_loc_weights_sum'] / feat['com_item_cnt']
feat['com_item_time_weights_mean'] = feat['com_item_time_weights_sum'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum','com_item_time_weights_sum','com_item_record_weights_sum',
'com_item_loc_weights_mean','com_item_time_weights_mean','com_item_record_weights_mean' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
feat['user_item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
for col in ['i2i_score','blend_score','i2i2i_score']:
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user_item',col+'_sum'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user_item',col+'_mean'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_different_type_road_score_sum_mean_new(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
recall_source_names = ['i2i_w02','b2b','i2i2i','i2i_w10','i2i2b']
recall_source_names = [ i+'_score' for i in recall_source_names ]
for idx,col in enumerate(recall_source_names):
feat[col] = feat['sim_weight']
feat.loc[ feat['recall_type']!=idx, col ] = np.nan
for col in recall_source_names:
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user','item',col+'_sum'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user','item',col+'_mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat_list = recall_source_names + [ col+'_sum' for col in recall_source_names ] + [ col+'_mean' for col in recall_source_names ]
feat = feat[ feat_list ]
return feat
def feat_sim_base(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
road_item_cnt = np.zeros( num, dtype=float )
t_item_cnt = np.zeros( num, dtype=float )
com_item_cij = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
road_item_cnt[i] = item_cnt[ road_item[i] ]
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_cij[i] = sim_item[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
road_item_cnt[i] = np.nan
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
if t_item[i] in item_set:
t_item_cnt[i] = item_cnt[ t_item[i] ]
else:
t_item_cnt[i] = np.nan
feat['road_item_cnt'] = road_item_cnt
feat['item_cnt'] = t_item_cnt
feat['com_item_cij'] = com_item_cij
feat['com_item_cnt'] = t_com_item_cnt
feat = feat[ ['road_item_cnt','item_cnt','com_item_cij','com_item_cnt' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_abs_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if loc_weight<=0.1:
loc_weight = 0.1
result.append(loc_weight)
feat['u2i_abs_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if abs(loc_weight)<=0.1:
loc_weight = 0.1
if loc2 < loc1:
loc_weight = -loc_weight
result.append(loc_weight)
feat['u2i_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_abs_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if time_weight<=0.1:
time_weight = 0.1
result.append(time_weight)
feat['u2i_abs_time_weights'] = result
cols = [ 'u2i_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_u2i_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if abs(time_weight)<=0.1:
time_weight = 0.1
if t1 > t2:
time_weight = -time_weight
result.append(time_weight)
feat['u2i_time_weights'] = result
cols = [ 'u2i_time_weights' ]
feat = feat[ cols ]
return feat
def feat_automl_cate_count(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'road_item','item','road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_automl_user_cate_count(data):
df = data.copy()
feat = df[ ['index','user','road_item','item'] ]
feat['user-road_item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str')
feat['user-item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
feat['user-road_item-item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'user-road_item','user-item','user-road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_u2i_road_item_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time']]
feat = feat.groupby(['user','road_item_loc']).first().reset_index()
feat_group = feat.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_group['road_item_time'].diff(1)
feat2 = feat_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_time_diff_history'
feat2.name = 'u2i_road_item_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2],axis=1),how='left',on=['user','road_item_loc'])
cols = [ 'u2i_road_item_time_diff_history', 'u2i_road_item_time_diff_future' ]
feat = feat[ cols ]
return feat
def feat_road_item_text_dot(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
return c
else:
return np.nan
feat['road_item_text_dot'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_dot'] ]
return feat
def feat_road_item_text_norm2(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func1(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return a*b
else:
return np.nan
def func2(ss):
item1 = ss
if ( item1 in item_text ):
item1_text = item_text[item1]
a = np.linalg.norm( item1_text )
return a
else:
return np.nan
feat['road_item_text_product_norm2'] = df[ ['road_item','item'] ].apply(func1, axis=1)
feat['road_item_text_norm2'] = df['road_item'].apply(func2)
feat['item_text_norm2'] = df['item'].apply(func2)
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_automl_cate_count_all_1(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
feat[cate1+'_count_'] = feat[cate1].map( feat[cate1].value_counts() )
cols.append( cate1+'_count_' )
print(f'feat {cate1} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_2(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
name2 = f'{cate1}_{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_3(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
for c in range(b+1,n):
cate3 = categories[c]
name3 = f'{cate1}_{cate2}_{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_time_window_cate_count(data):
if mode=='valid':
all_train_data = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
all_train_data = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_with_time = all_train_data[["item_id", "time"]].sort_values(["item_id", "time"])
item2time = item_with_time.groupby("item_id")["time"].agg(list).to_dict()
utils.dump_pickle(item2time, item2time_path.format(mode))
item2times = utils.load_pickle(item2time_path.format(mode))
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
]]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
try:
item2times = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
feat["item_cnt_around_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.07))
feat["item_cnt_before_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.07))
feat["item_cnt_after_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.07))
feat["item_cnt_around_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.1))
feat["item_cnt_before_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.1))
feat["item_cnt_after_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.1))
feat["item_cnt_around_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.15))
feat["item_cnt_before_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.15))
feat["item_cnt_after_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.15))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
#在召回集内,限定时间(qtime 附近) 这个item被召回了多少次
# item2times 改变了 其他的逻辑不变
def item_recall_cnt_around_qtime(data):
item2times = data.groupby("item")["time"].agg(list).to_dict()
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
new_cols = []
new_col_name = "item_recall_cnt_{}_time_{}"
for delta in [0.01, 0.02, 0.05, 0.07, 0.1, 0.15]:
print('running delta: ', delta)
for mode in ["all", "left", "right"]:
new_col = new_col_name.format(mode, delta)
new_cols.append(new_col)
feat[new_col] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode=mode, delta=delta))
return feat[new_cols]
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item','road_item-item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_topk_by_loc = {}
result_history_loc_diff1_cnt = {}
result_future_loc_diff1_cnt = {}
result_history_loc_diff1_time_mean = {}
result_future_loc_diff1_time_mean = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
result_history_loc_diff1_cnt[key] = 0.0
result_future_loc_diff1_cnt[key] = 0.0
result_history_loc_diff1_time_mean[key] = 0
result_future_loc_diff1_time_mean[key] = 0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result_history_loc_diff1_cnt[key] += 1
result_history_loc_diff1_time_mean[key] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result_future_loc_diff1_cnt[key] += 1
result_future_loc_diff1_time_mean[key] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)))
result_history_loc_diff1_time_mean[key] /=(result_history_loc_diff1_cnt[key]+1e-5)
result_future_loc_diff1_time_mean[key] /=(result_future_loc_diff1_cnt[key]+1e-5)
result_one = sorted(result[key],key=lambda x:x[0])
result_one_len = len(result_one)
result_topk_by_loc[key] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
feat['history_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_history_loc_diff1_time_mean).fillna(0)
feat['future_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_future_loc_diff1_time_mean).fillna(0)
feat['history_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_history_loc_diff1_cnt).fillna(0)
feat['future_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_future_loc_diff1_cnt).fillna(0)
feat_top = []
for key,value in result_topk_by_loc.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_median = {}
result_mean = {}
result_topk = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result[key],reverse=True)
result_one_len = len(result_one)
result_median[key] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result_mean[key] = sum(result[key])/len(result[key])
result_topk[key] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
feat['i2i_cijs_median'] = feat['new_keys'].map(result_median)
feat['i2i_cijs_mean'] = feat['new_keys'].map(result_mean)
feat_top = []
for key,value in result_topk.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean_by_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].sum().reset_index()
df[col+'_by_item_sum'] = df[col]
df = df[ ['item',col+'_by_item_sum'] ]
feat = pd.merge( feat, df, on='item', how='left')
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].mean().reset_index()
df[col+'_by_item_mean'] = df[col]
df = df[ ['item',col+'_by_item_mean'] ]
feat = pd.merge( feat, df, on='item', how='left')
feat = feat[[f'{i}_by_item_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_different_type_road_score_mean_by_road_item(data):
df = data.copy()
feat = df[ ['user','road_item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['road_item',col,'index'] ]
df = df.groupby('road_item')[col].mean().reset_index()
df[col+'_by_road_item_mean'] = df[col]
df = df[ ['road_item',col+'_by_road_item_mean'] ]
feat = pd.merge( feat, df, on='road_item', how='left')
feat = feat[[f'{i}_by_road_item_mean' for i in cols]]
return feat
def feat_different_type_road_score_mean_by_loc_diff(data):
df = data.copy()
feat = df[ ['user','index','sim_weight','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['loc_diff',col,'index'] ]
df = df.groupby('loc_diff')[col].mean().reset_index()
df[col+'_by_loc_diff_mean'] = df[col]
df = df[ ['loc_diff',col+'_by_loc_diff_mean'] ]
feat = pd.merge( feat, df, on='loc_diff', how='left')
feat = feat[[f'{i}_by_loc_diff_mean' for i in cols]]
return feat
def feat_different_type_road_score_sum_mean_by_recall_type_and_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].sum().reset_index()
df[col+'_by_item-recall_type_sum'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_sum'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].mean().reset_index()
df[col+'_by_item-recall_type_mean'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_mean'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
feat = feat[[f'{i}_by_item-recall_type_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_base_info_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
#all_train_stage_data = pd.concat( all_train_stage_data.iloc[0:1000], all_train_stage_data.iloc[-10000:] )
df_train_stage = all_train_stage_data
df = data.copy()
feat = df[ ['index','road_item','item','stage'] ]
stage2sim_item = {}
stage2item_cnt = {}
stage2com_item_cnt = {}
for sta in range(cur_stage+1):
df_train = df_train_stage[ df_train_stage['stage']==sta ]
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
stage2sim_item[sta] = sim_item
stage2item_cnt[sta] = item_cnt
stage2com_item_cnt[sta] = com_item_cnt
sta_list = []
itemb_list = []
sum_sim_list = []
count_sim_list = []
mean_sim_list = []
nunique_itema_count_list = []
for sta in range(cur_stage+1):
for key1 in stage2sim_item[sta].keys():
val = 0
count = 0
for key2 in stage2sim_item[sta][key1].keys():
val += stage2sim_item[sta][key1][key2]
count += stage2com_item_cnt[sta][key1][key2]
sta_list.append( sta )
itemb_list.append( key1 )
sum_sim_list.append( val )
count_sim_list.append( count )
mean_sim_list.append( val/count )
nunique_itema_count_list.append( len( stage2sim_item[sta][key1].keys() ) )
data1 = pd.DataFrame( {'stage':sta_list, 'item':itemb_list, 'sum_sim_in_stage':sum_sim_list, 'count_sim_in_stage':count_sim_list,
'mean_sim_in_stage':mean_sim_list, 'nunique_itema_count_in_stage':nunique_itema_count_list } )
'''
sta_list = []
item_list = []
cnt_list = []
for sta in range(cur_stage+1):
for key1 in stage2item_cnt[sta].keys():
sta_list.append(sta)
item_list.append(key1)
cnt_list.append( stage2item_cnt[sta][key1] )
data2 = pd.DataFrame( {'stage':sta_list, 'road_item':item_list, 'stage_road_item_cnt':cnt_list } )
data3 = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'stage_item_cnt':cnt_list } )
'''
#feat = pd.merge( feat,data1, how='left',on=['stage','road_item','item'] )
#feat = pd.merge( feat,data2, how='left',on=['stage','road_item'] )
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat = feat[ ['sum_sim_in_stage','count_sim_in_stage','mean_sim_in_stage','nunique_itema_count_in_stage'] ]
return feat
def feat_item_time_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','stage','time'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','item_id'] )['time'].agg( ['max','min','mean'] ).reset_index()
data1.columns = [ 'stage','item','time_max_in_stage','time_min_in_stage','time_mean_in_stage' ]
data1['time_dura_in_stage'] = data1['time_max_in_stage'] - data1['time_min_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat['time_diff_min_in_stage'] = feat['time'] - feat['time_min_in_stage']
feat['time_diff_max_in_stage'] = feat['time_max_in_stage'] - feat['time']
cols = [ 'time_dura_in_stage','time_max_in_stage','time_min_in_stage','time_mean_in_stage','time_diff_min_in_stage','time_diff_max_in_stage' ]
feat = feat[ cols ]
return feat
def feat_user_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data1.name = 'user_count_in_stage'
data1 = data1.reset_index()
data1 = data1.rename( columns={'user_id':'user'} )
data2 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].nunique()
data2.name = 'item_nunique_in_stage'
data2 = data2.reset_index()
data2 = data2.rename( columns={'item_id':'item'} )
data3 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].count()
data3.name = 'item_count_in_stage'
data3 = data3.reset_index()
data3 = data3.rename( columns={'item_id':'item'} )
data3[ 'item_ratio_in_stage' ] = data3[ 'item_count_in_stage' ] / data2['item_nunique_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','user'] )
feat = pd.merge( feat,data2, how='left',on=['stage','item'] )
feat = pd.merge( feat,data3, how='left',on=['stage','item'] )
cols = [ 'user_count_in_stage','item_nunique_in_stage','item_ratio_in_stage' ]
feat = feat[ cols ]
return feat
def feat_item_com_cnt_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["road_item", "stage"]]
feat["head"] = feat.set_index(["road_item", "stage"]).index
feat["itema_cnt_in_stage"] = feat["head"].map(item_stage_cnt)
return feat[["itema_cnt_in_stage"]]
def item_cnt_in_stage2(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
return feat[["item_stage_cnt"]]
def feat_item_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["item"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
item_stage_cnt = train_stage_data.groupby(['item_id'])['index'].count()
item_stage_cnt.name = f"item_stage_cnt_{sta}"
item_stage_cnt = item_stage_cnt.reset_index()
item_stage_cnt.columns = ['item',f"item_stage_cnt_{sta}"]
feat = pd.merge( feat,item_stage_cnt,how='left',on='item' )
cols.append( f"item_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["user"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
user_stage_cnt = train_stage_data.groupby(['user_id'])['index'].count()
user_stage_cnt.name = f"user_stage_cnt_{sta}"
user_stage_cnt = user_stage_cnt.reset_index()
user_stage_cnt.columns = ['user',f"user_stage_cnt_{sta}"]
feat = pd.merge( feat,user_stage_cnt,how='left',on='user' )
cols.append( f"user_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_and_item_count_in_three_init_data(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
df_train_stage = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
data1 = df_train_stage.groupby( ['stage','item_id'] )['index'].count()
data1.name = 'in_stage_item_count'
data1 = data1.reset_index()
data1 = data1.rename( columns = {'item_id':'item'} )
data2 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data2.name = 'in_stage_user_count'
data2 = data2.reset_index()
data2 = data2.rename( columns = {'user_id':'user'} )
data3 = df_train_stage.groupby( ['item_id'] )['index'].count()
data3.name = 'no_in_stage_item_count'
data3 = data3.reset_index()
data3 = data3.rename( columns = {'item_id':'item'} )
data4 = df_train_stage.groupby( ['user_id'] )['index'].count()
data4.name = 'no_in_stage_user_count'
data4 = data4.reset_index()
data4 = data4.rename( columns = {'user_id':'user'} )
data5 = df_train.groupby( ['item_id'] )['index'].count()
data5.name = 'no_stage_item_count'
data5 = data5.reset_index()
data5 = data5.rename( columns = {'item_id':'item'} )
data6 = df_train.groupby( ['user_id'] )['index'].count()
data6.name = 'no_stage_user_count'
data6 = data6.reset_index()
data6 = data6.rename( columns = {'user_id':'user'} )
feat = pd.merge( feat,data1,how='left',on=['stage','item'] )
feat = pd.merge( feat,data2,how='left',on=['stage','user'] )
feat = pd.merge( feat,data3,how='left',on=['item'] )
feat = pd.merge( feat,data4,how='left',on=['user'] )
feat = pd.merge( feat,data5,how='left',on=['item'] )
feat = pd.merge( feat,data6,how='left',on=['user'] )
cols = [ 'in_stage_item_count','in_stage_user_count','no_in_stage_item_count','no_in_stage_user_count','no_stage_item_count','no_stage_user_count' ]
return feat[ cols ]
#def feat_item_count_in_three_init_data(data):
def feat_i2i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2i_sim_seq')
i2i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2i_sim_seq.keys():
continue
records = i2i2i_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2i_road_cnt','i2i2i_score1_mean','i2i2i_score2_mean','i2i2i_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_i2i2b_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2b_sim_seq')
i2i2b_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2b_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2b_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2b_sim_seq.keys():
continue
records = i2i2b_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2b_road_cnt','i2i2b_score1_mean','i2i2b_score2_mean','i2i2b_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_numerical_groupby_item_cnt_in_stage(data):
df = data.copy()
num_cols = [ 'sim_weight', 'loc_weight', 'time_weight', 'rank_weight' ]
cate_col = 'item_stage_cnt'
feat = df[ ['index','road_item','item'] ]
feat1 = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'item_cnt_in_stage2_{mode}_{cur_stage}.pkl') )
df[ cate_col ] = feat1[ cate_col ]
feat[ cate_col ] = feat1[ cate_col ]
cols = []
for col in num_cols:
t = df.groupby(cate_col)[col].agg( ['mean','max','min'] )
cols += [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t.columns = [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t = t.reset_index()
feat = pd.merge( feat, t, how='left', on=cate_col )
return feat[ cols ]
#i2i_score,
#
def feat_item_stage_nunique(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_nunique = all_train_stage_data.groupby(["item_id"])["stage"].nunique()
feat = data[["item"]]
feat["item_stage_nunique"] = feat["item"].map(item_stage_nunique)
return feat[["item_stage_nunique"]]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result_history = np.zeros(df_v.shape[0])*np.nan
result_future = np.zeros(df_v.shape[0])*np.nan
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = [0]+item_time_list[df_v[i,0]]+[1]
for j in range(1,len(time_list)):
if time<time_list[j]:
result_future[i] = time_list[j]-time
result_history[i] = time-time_list[j-1]
break
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = item_time_list[df_v[i,0]]+[1]
for j in range(len(time_list)):
if time<time_list[j]:
result[i] = j
break
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_road_time_bins_cate_cnt(data):
df = data.copy()
categoricals = ['item','road_item','user','recall_type']
feat = df[['road_item_time']+categoricals]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categoricals.append('loc_diff')
feat['road_time_bins'] = pd.Categorical(pd.cut(feat['road_item_time'],100)).codes
cols = []
for cate in categoricals:
cnt = feat.groupby([cate,'road_time_bins']).size()
cnt.name = f'{cate}_cnt_by_road_time_bins'
cols.append(cnt.name)
feat = feat.merge(cnt,how='left',on=[cate,'road_time_bins'])
return feat[cols]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
import time as ti
t = ti.time()
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
del df
try:
item_time_list = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
delta_list = np.array(sorted([0.01, 0.02, 0.05, 0.07, 0.1, 0.15]))
delta_list2 = delta_list[::-1]
delta_n = delta_list.shape[0]
n = delta_n*2+1
result_tmp = np.zeros((df_v.shape[0],n))
result_equal = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = np.ones(n)*df_v[i,1]
time[:delta_n] -= delta_list2
time[-delta_n:] += delta_list
time_list = item_time_list[df_v[i,0]]+[10]
k = 0
for j in range(len(time_list)):
while k<n and time[k]<time_list[j] :
result_tmp[i,k] = j
k += 1
if time[delta_n]==time_list[j]:
result_equal[i] += 1
result_tmp[i,k:] = j
if i%100000 == 0:
print(f'[{i}/{df_v.shape[0]}]:time {ti.time()-t:.3f}s')
t = ti.time()
result = np.zeros((df_v.shape[0],delta_n*3))
for i in range(delta_n):
result[:,i*3+0] = result_tmp[:,delta_n] - result_tmp[:,i]
result[:,i*3+1] = result_tmp[:,-(i+1)] - result_tmp[:,delta_n] + result_equal
result[:,i*3+2] = result_tmp[:,-(i+1)] - result_tmp[:,i]
cols = [f'item_cnt_{j}_time_{i}' for i in delta_list2 for j in ['before','after','around']]
result = pd.DataFrame(result,columns=cols)
result = result[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
return result
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = max(item_feat.keys())+1
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
result = np.divide(result,item_l2[feat['road_item']]*item_l2[feat['item']]+1e-9)
feat['road_item_text_cossim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_cossim'] = np.nan
return feat[['road_item_text_cossim']]
def feat_road_item_text_eulasim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.linalg.norm(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:]-item_np[feat['item'][i*batch_size:(i+1)*batch_size],:],axis=1)
feat['road_item_text_eulasim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_eulasim'] = np.nan
return feat[['road_item_text_eulasim']]
def feat_road_item_text_dot(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
feat['road_item_text_dot'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_dot'] = np.nan
return feat[['road_item_text_dot']]
def feat_road_item_text_norm2(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
#result[i] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]] + [np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
#median,mean,topk
result = np.zeros((len(new_keys),2+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result_one,reverse=True)
result_one_len = len(result_one)
result[i,0] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result[i,1] = sum(result_one)/(len(result_one))
result[i,2:] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
df = feat[ ['index','user','item','i2i_score','blend_score','i2i2i_score'] ]
df = df.groupby( ['user','item'] )[ ['i2i_score','blend_score','i2i2i_score'] ].agg( ['sum','mean'] ).reset_index()
df.columns = ['user','item'] + [ f'{i}_{j}' for i in ['i2i_score','blend_score','i2i2i_score'] for j in ['sum','mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['recall_type','road_item','item']).size()
tmp.name = 'recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['recall_type','road_item','item'])
cols.append(tmp.name)
print('feat recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['loc_diff','road_item','item']).size()
tmp.name = 'loc_diff-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['loc_diff','road_item','item'])
cols.append(tmp.name)
print('feat loc_diff road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
tmp = feat.groupby(['user','recall_type','road_item','item']).size()
tmp.name = 'user-recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['user','recall_type','road_item','item'])
cols.append(tmp.name)
print('feat user recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array(v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result = np.zeros(df.shape[0])
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result[int(v[k,0])] = j
k += 1
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array([0]+v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result_history = np.zeros(df.shape[0])*np.nan
result_future = np.zeros(df.shape[0])*np.nan
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(1,len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result_future[int(v[k,0])] = time_list[j]-v[k,1]
result_history[int(v[k,0])] = v[k,1]-time_list[j-1]
k += 1
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_sim_three_weight_no_clip(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
else:
time_weight = (1 - (t2 - t1) * 100)
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum_no_clip'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum_no_clip'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean_no_clip'] = feat['com_item_loc_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_time_weights_mean_no_clip'] = feat['com_item_time_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum_no_clip','com_item_time_weights_sum_no_clip',
'com_item_loc_weights_mean_no_clip','com_item_time_weights_mean_no_clip', ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_road_item_before_and_after_query_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time','query_item_time']]
feat_h = feat.loc[feat['road_item_time']<feat['query_item_time']]
feat_f = feat.loc[feat['road_item_time']>feat['query_item_time']]
feat_h = feat_h.groupby(['user','road_item_loc']).first().reset_index()
feat_f = feat_f.groupby(['user','road_item_loc']).first().reset_index()
feat_h_group = feat_h.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat_f_group = feat_f.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_h_group['road_item_time'].diff(1)
feat2 = feat_h_group['road_item_time'].diff(-1)
feat3 = feat_f_group['road_item_time'].diff(1)
feat4 = feat_f_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_before_query_time_diff_history'
feat2.name = 'u2i_road_item_before_query_time_diff_future'
feat3.name = 'u2i_road_item_after_query_time_diff_history'
feat4.name = 'u2i_road_item_after_query_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2,feat3,feat4],axis=1),how='left',on=['user','road_item_loc'])
cols = ['u2i_road_item_before_query_time_diff_history',
'u2i_road_item_before_query_time_diff_future',
'u2i_road_item_after_query_time_diff_history',
'u2i_road_item_after_query_time_diff_future']
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc_new(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[:,1]/=(result[:,3]+1e-5)
result[:,0]/=(result[:,2]+1e-5)
cols = ['history_loc_diff1_com_item_time_mean_new',
'future_loc_diff1_com_item_time_mean_new',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ ['history_loc_diff1_com_item_time_mean_new','future_loc_diff1_com_item_time_mean_new'] ]
return feat
def feat_items_list_len(data):
df = data.copy()
feat = df[ ['index','user','left_items_list','right_items_list','stage'] ]
def func(s):
return len(s)
tdata = feat.groupby('user').first()
tdata['left_items_list_len'] = tdata['left_items_list'].apply( func )
tdata['right_items_list_len'] = tdata['right_items_list'].apply( func )
import pdb
pdb.set_trace()
return feat
def feat_item_cnt_in_stage2_mean_max_min_by_user(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["user","item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
tmp = feat.groupby('user')['item_stage_cnt'].agg(['mean','max','min'])
tmp.columns = [f'item_cnt_in_stage2_{i}_by_user' for i in tmp.columns]
feat = feat.merge(tmp,how='left',on='user')
return feat[tmp.columns]
def feat_item_seq_sim_cossim_text(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 10000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_textsim_max','left_allitem_item_textsim_sum'])
df_left['left_allitem_textsim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_textsim_max','right_allitem_item_textsim_sum'])
df_right['right_allitem_textsim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_textsim_max'] = feat[['left_allitem_item_textsim_max','right_allitem_item_textsim_max']].max(axis=1)
feat['allitem_item_textsim_sum'] = feat[['left_allitem_item_textsim_sum','right_allitem_item_textsim_sum']].sum(axis=1)
feat['allitem_item_textsim_len'] = feat[['left_allitem_textsim_len','right_allitem_textsim_len']].sum(axis=1)
feat['allitem_item_textsim_mean'] = feat['allitem_item_textsim_sum']/(feat['allitem_item_textsim_len']+1e-9)
return feat[['allitem_item_textsim_max','allitem_item_textsim_mean']]
def feat_item_seq_sim_cossim_image(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[1]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 10000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_imagesim_max','left_allitem_item_imagesim_sum'])
df_left['left_allitem_imagesim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_imagesim_max','right_allitem_item_imagesim_sum'])
df_right['right_allitem_imagesim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_imagesim_max'] = feat[['left_allitem_item_imagesim_max','right_allitem_item_imagesim_max']].max(axis=1)
feat['allitem_item_imagesim_sum'] = feat[['left_allitem_item_imagesim_sum','right_allitem_item_imagesim_sum']].sum(axis=1)
feat['allitem_item_imagesim_len'] = feat[['left_allitem_imagesim_len','right_allitem_imagesim_len']].sum(axis=1)
feat['allitem_item_imagesim_mean'] = feat['allitem_item_imagesim_sum']/(feat['allitem_item_imagesim_len']+1e-9)
return feat[['allitem_item_imagesim_max','allitem_item_imagesim_mean']]
def feat_i2i_sim_on_hist_seq(data):
# get i2i similarities dict
# 没用
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
print("compute i2i sim end.")
max_i2i_sim_arr = np.zeros(len(data))
mean_i2i_sim_arr = np.zeros(len(data))
# 还可以做过去N次点击,过去N时间内的统计
for i, (left_seq, right_seq, item) in enumerate(zip(data["left_items_list"].values, data["right_items_list"].values, data["item"].values)):
if i % 100000 == 0:
print("{} in length {}".format(i, len(data)))
seq_i2i_sim = []
for h_item in left_seq + right_seq:
sim_item[h_item].setdefault(item, 0)
seq_i2i_sim.append(sim_item[h_item][item])
max_i2i_sim_arr[i] = max(seq_i2i_sim) if len(left_seq) > 0 else np.nan
mean_i2i_sim_arr[i] = sum(seq_i2i_sim) / len(left_seq) if len(left_seq) > 0 else np.nan
feat = data[["item"]]
feat["max_i2i_sim_arr"] = max_i2i_sim_arr
feat["mean_i2i_sim_arr"] = mean_i2i_sim_arr
return feat[[
"max_i2i_sim_arr", "mean_i2i_sim_arr"
]]
def feat_item_seq_sim_cossim_text(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 30000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
len_max_nums = 300
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_textsim_max','left_allitem_item_textsim_sum'])
df_left['left_allitem_textsim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
len_max_nums = 80
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_textsim_max','right_allitem_item_textsim_sum'])
df_right['right_allitem_textsim_len'] = right_result_len
df_left = df_left.sort_index()
df_right = df_right.sort_index()
feat = pd.concat([df_left,df_right],axis=1)
feat['allitem_item_textsim_max'] = feat[['left_allitem_item_textsim_max','right_allitem_item_textsim_max']].max(axis=1)
feat['allitem_item_textsim_sum'] = feat[['left_allitem_item_textsim_sum','right_allitem_item_textsim_sum']].sum(axis=1)
feat['allitem_item_textsim_len'] = feat[['left_allitem_textsim_len','right_allitem_textsim_len']].sum(axis=1)
feat['allitem_item_textsim_mean'] = feat['allitem_item_textsim_sum']/(feat['allitem_item_textsim_len']+1e-9)
return feat[['allitem_item_textsim_max','allitem_item_textsim_mean']]
def feat_item_seq_sim_cossim_image(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[1]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 30000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
len_max_nums = 300
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_imagesim_max','left_allitem_item_imagesim_sum'])
df_left['left_allitem_imagesim_len'] = left_result_len
feat['right_len'] = feat['right_items_list'].apply(len)
feat_right = feat.sort_values('right_len')
feat_right_len = feat_right['right_len'].values
feat_right_items_list = feat_right['right_items_list'].values
feat_right_items = feat_right['item'].values
right_result = np.zeros((len(feat_right),2))
right_result_len = np.zeros(len(feat_right))
len_max_nums = 80
for i in range(batch_num):
cur_batch_size = len(feat_right_len[i*batch_size:(i+1)*batch_size])
max_len = feat_right_len[i*batch_size:(i+1)*batch_size].max()
max_len = min(max(max_len,1),len_max_nums)
right_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_right_items_list[i*batch_size:(i+1)*batch_size]):
arr = arr[:len_max_nums]
right_items[j][:len(arr)] = arr
right_result_len[i*batch_size:(i+1)*batch_size] = np.isin(right_items,all_items).sum(axis=1)
vec1 = item_np[right_items]
vec2 = item_np[feat_right_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
right_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
right_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_right = | pd.DataFrame(right_result,index=feat_right.index,columns=['right_allitem_item_imagesim_max','right_allitem_item_imagesim_sum']) | pandas.DataFrame |
import time
import pandas as pd
import scrapping
def Items(items):
# intiate results items dataframe
Results = pd.DataFrame()
for counter in range(len(items['Items'])):
# print(items[counter])
GetItem = {'item':items['Items'][counter],
'link':'https://www.alibaba.com/trade/search?fsb=y&IndexArea=product_en&CatId=&SearchText={0}'.format(items['Items'][counter].replace(' ','+')),
'number':items['Search'][counter]}
# pause for 1 second to avoid web blocking
time.sleep(1)
# scrapping results on a temp DataFrame
temp = scrapping.ItemResults(GetItem)
# append the results to Results DataFrame
Results = pd.concat([Results,temp],ignore_index=True)
print('Item {0} has been successfully scrapped.'.format(items['Items'][counter]))
return Results
def main():
# Stopping variable
check = True
# get csv name and check its validity
while check:
file = input('Please, enter file name .csv to scrap its list: ')
try:
items = | pd.read_csv(file) | pandas.read_csv |
# -*- coding: utf-8
"""Test the created constraints against approved constraints.
This file is part of project oemof (github.com/oemof/oemof-thermal).
It's copyrighted by the contributors recorded in the version control
history of the file, available from its original location
oemof-thermal/tests/constraint_tests.py
SPDX-License-Identifier: MIT
"""
import logging
import os
import re
from difflib import unified_diff
import pandas as pd
from oemof.thermal import facades
from oemof.network.network import Node
from oemof.solph import helpers
import oemof.solph as solph
logging.disable(logging.INFO)
def chop_trailing_whitespace(lines):
return [re.sub(r'\s*$', '', line) for line in lines]
def remove(pattern, lines):
if not pattern:
return lines
return re.subn(pattern, "", "\n".join(lines))[0].split("\n")
def normalize_to_positive_results(lines):
negative_result_indices = [
n for n, line in enumerate(lines)
if re.match("^= -", line)]
equation_start_indices = [
[n for n in reversed(range(0, nri))
if re.match('.*:$', lines[n])][0] + 1
for nri in negative_result_indices]
for (start, end) in zip(
equation_start_indices,
negative_result_indices):
for n in range(start, end):
lines[n] = (
'-'
if lines[n] and lines[n][0] == '+'
else '+'
if lines[n]
else lines[n]) + lines[n][1:]
lines[end] = '= ' + lines[end][3:]
return lines
def compare_lp_files(lp_file_1, lp_file_2, ignored=None):
lines_1 = remove(ignored, chop_trailing_whitespace(lp_file_1.readlines()))
lines_2 = remove(ignored, chop_trailing_whitespace(lp_file_2.readlines()))
lines_1 = normalize_to_positive_results(lines_1)
lines_2 = normalize_to_positive_results(lines_2)
if not lines_1 == lines_2:
raise AssertionError(
"Failed matching lp_file_1 with lp_file_2:\n"
+ "\n".join(
unified_diff(
lines_1,
lines_2,
fromfile=os.path.relpath(
lp_file_1.name),
tofile=os.path.basename(
lp_file_2.name),
lineterm=""
)
))
class TestConstraints:
@classmethod
def setup_class(cls):
cls.objective_pattern = re.compile(r'^objective.*(?=s\.t\.)',
re.DOTALL | re.MULTILINE)
cls.date_time_index = pd.date_range('1/1/2012', periods=3, freq='H')
cls.tmpdir = helpers.extend_basic_path('tmp')
logging.info(cls.tmpdir)
@classmethod
def setup(cls):
cls.energysystem = solph.EnergySystem(groupings=solph.GROUPINGS,
timeindex=cls.date_time_index)
Node.registry = cls.energysystem
def get_om(self):
return solph.Model(self.energysystem,
timeindex=self.energysystem.timeindex)
def compare_to_reference_lp(self, ref_filename, my_om=None):
if my_om is None:
om = self.get_om()
else:
om = my_om
tmp_filename = ref_filename.replace('.lp', '') + '_tmp.lp'
new_filepath = os.path.join(self.tmpdir, tmp_filename)
om.write(new_filepath, io_options={'symbolic_solver_labels': True})
ref_filepath = os.path.join(os.path.dirname(__file__), 'lp_files', ref_filename)
with open(new_filepath) as new_file:
with open(ref_filepath) as ref_file:
compare_lp_files(new_file, ref_file)
def test_stratified_thermal_storage_facade(self):
"""Constraint test of a StratifiedThermalStorage without investment.
"""
bus_heat = solph.Bus(label='bus_heat')
facades.StratifiedThermalStorage(
label='thermal_storage',
bus=bus_heat,
diameter=10,
height=30,
temp_h=95,
temp_c=60,
temp_env=10,
u_value=0.5,
min_storage_level=0.975,
max_storage_level=0.025,
capacity=2,
efficiency=1,
marginal_cost=0.0001
)
self.compare_to_reference_lp('stratified_thermal_storage.lp')
def test_stratified_thermal_storage_invest_option_1_facade(self):
"""
Constraint test of a StratifiedThermalStorage with investment.
Ratio between capacity and storage_capacity is fixed.
"""
bus_heat = solph.Bus(label='bus_heat')
facades.StratifiedThermalStorage(
label='thermal_storage',
bus=bus_heat,
diameter=10,
temp_h=95,
temp_c=60,
temp_env=10,
u_value=0.5,
expandable=True,
capacity_cost=0,
storage_capacity_cost=400,
minimum_storage_capacity=1,
invest_relation_input_capacity=1 / 6,
min_storage_level=0.975,
max_storage_level=0.025,
efficiency=1,
marginal_cost=0.0001
)
self.compare_to_reference_lp('stratified_thermal_storage_invest_option_1.lp')
def test_stratified_thermal_storage_invest_option_2_facade(self):
"""
Constraint test of a StratifiedThermalStorage with investment.
Ratio between capacity and storage_capacity is left open.
"""
bus_heat = solph.Bus(label='bus_heat')
facades.StratifiedThermalStorage(
label='thermal_storage',
bus=bus_heat,
diameter=10,
temp_h=95,
temp_c=60,
temp_env=10,
u_value=0.5,
expandable=True,
capacity_cost=50,
storage_capacity_cost=400,
minimum_storage_capacity=1,
min_storage_level=0.975,
max_storage_level=0.025,
efficiency=1,
marginal_cost=0.0001
)
self.compare_to_reference_lp('stratified_thermal_storage_invest_option_2.lp')
def test_csp_collector_facade(self):
"""Constraint test of a csp collector.
"""
bus_heat = solph.Bus(label='bus_heat')
bus_el = solph.Bus(label='bus_el')
d = {
'Datum': [
'01.02.2003 09:00', '01.02.2003 10:00', '01.02.2003 11:00'],
'E_dir_hor': [43.1, 152.7, 76.9],
't_amb': [22.2, 23.2, 24.1]}
input_data = pd.DataFrame(data=d)
input_data['Datum'] = pd.to_datetime(input_data['Datum'])
input_data.set_index('Datum', inplace=True)
input_data.index = input_data.index.tz_localize(tz='Asia/Muscat')
facades.ParabolicTroughCollector(
label='solar_collector',
heat_bus=bus_heat,
electrical_bus=bus_el,
electrical_consumption=0.05,
additional_losses=0.2,
aperture_area=1000,
loss_method='Janotte',
irradiance_method='horizontal',
latitude=23.614328,
longitude=58.545284,
collector_tilt=10,
collector_azimuth=180,
cleanliness=0.9,
a_1=-0.00159,
a_2=0.0000977,
eta_0=0.816,
c_1=0.0622,
c_2=0.00023,
temp_collector_inlet=435,
temp_collector_outlet=500,
temp_amb=input_data['t_amb'],
irradiance=input_data['E_dir_hor'])
self.compare_to_reference_lp('csp_collector.lp')
def test_solar_thermal_collector_facade(self):
"""
Constraint test of a solar thermal collector.
"""
bus_heat = solph.Bus(label='bus_heat')
bus_el = solph.Bus(label='bus_el')
d = {
'Datum': [
'01.02.2003 09:00', '01.02.2003 10:00', '01.02.2003 11:00'],
'global_horizontal_W_m2': [47, 132, 131],
'diffuse_horizontal_W_m2': [37.57155865, 69.72163199, 98.85021832],
'temp_amb': [4, 6, 8]}
input_data = | pd.DataFrame(data=d) | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import math
import collections
import pymongo
import json
import copy
import hashlib
from io import StringIO
import warnings
warnings.filterwarnings("ignore")
home_path = os.getenv("HOME")
desktop_path = f"{home_path}/Desktop"
class UtilsPandas():
def __init__(self):
pass
# 1. [数据转化相关]
# ============================================================================
def import_data(
self, in_file_name="in", end_index=None, field=None, is_df=True,
in_file_path=None, encoding="gb18030", index_col=None,
):
"""
in:csv文件
out:df类型/类mongo类型
function: csv → df/mongo (默认转出:类mongo)
notes: in_file_path 的优先级比 in_file_name 高。
"""
if in_file_path:
df = pd.read_csv(in_file_path, encoding=encoding, engine='python', index_col=index_col)
else:
print("[INFO]: 没有传入'in_file_path', 尝试从桌面路径读取...")
df = pd.read_csv(desktop_path+"/{0}.csv".format(in_file_name), encoding=encoding, engine='python', index_col=index_col)
if is_df:
return df
# 1.需要返回的是某个字段的lst格式
if field:
field_lst = df[field].values[:end_index] # 得到的是np.array格式
return list(field_lst) # 用list数据格式来返回
# 2.返回的是mongo支持的docs
df = df[:end_index]
docs = df.T.to_dict().values()
return docs
def output_data(
self, in_obj, out_file_name="out", ordered_field_lst=None,
out_file_path=None, output=True, index=False, encoding="gb18030", export_excel=False,
):
"""
in:类mongo/df
out:csv文件
function: 1.mongo/df → csv
2.mongo → df (这样output设为False即可)
in_obj: 不管是mongo还是df,自动先转化成df,再用它来转csv
tips: 如果需要 "mongo → df": output设置为False即可!
notes: out_file_path 的优先级比 out_file_name 高。
"""
# 1. 如果是 "类mongo" 类型, 先转化成df
if isinstance(in_obj, pymongo.cursor.Cursor):
# total_items = []
# for doc in in_obj:
# # items = {i:str(j).strip() for i, j in zip(list(doc.keys()), list(doc.values()))}
# # 以下会按照mongo中存着的顺序进行输出!
# items = collections.OrderedDict({i:str(j).strip() for i, j in zip(list(doc.keys()), list(doc.values()))})
# total_items.append(items)
# df = pd.DataFrame(total_items)
df = pd.DataFrame(list(in_obj)) # 如果in_obj的数据量是上百万条, 其实这个操作很危险的!!
elif isinstance(in_obj, pd.core.frame.DataFrame):
df = in_obj
# 2.确定字段的呈现顺序
if ordered_field_lst:
# 如果指定的df字段在df中并不存在,则把该字段remove掉.确保不报错
for field in ordered_field_lst.copy():
if field not in df.columns:
print("字段 {} 不在df中,将其抛弃!".format(field))
ordered_field_lst.remove(field)
df = df[ordered_field_lst] # 指定顺序
# 3.看是否需要导出csv文件,如果不需要,直接返回df
if not output:
return df
# 4. 最后,将df数据转成csv文件输出
try:
if out_file_path:
if not export_excel:
df.to_csv(out_file_path, index=index, encoding=encoding)
else:
df.to_excel(out_file_path, index=index, encoding=encoding)
else:
if not export_excel:
df.to_csv(desktop_path+"/{0}.csv".format(out_file_name), index=index, encoding=encoding)
else:
df.to_excel(desktop_path+"/{0}.xlsx".format(out_file_name), index=index, encoding=encoding)
except Exception as e:
print(e)
out_file_name = input("输出文件名出错,请重新键入文件名: ")
df.to_csv(desktop_path+"/{0}.csv".format(out_file_name), index=index, encoding=encoding)
return df
# 输出excel文件
def output_excel(self, df_lst, out_file_name="out", out_file_path=None, sheet_name_lst=None):
from pandas import ExcelWriter
if out_file_path is None:
# 如果没有out_file_path: 默认放在桌面
out_file_path = f"{desktop_path}/{out_file_name}.xlsx"
with ExcelWriter(out_file_path) as writer:
for i, df in enumerate(df_lst):
if sheet_name_lst:
sheet_name = sheet_name_lst[i]
else:
sheet_name = f"sheet_{i}"
df.to_excel(writer, sheet_name, index=False)
writer.save()
def docs_to_df(self, docs, ordered_field_lst=None):
"""
把mongo的数据转化成df
"""
df = output_data(docs, output=False, ordered_field_lst=ordered_field_lst)
return df
def df_2_mongo(self, df):
return df.T.to_dict().values() # 即:docs
def df_to_docs(self, df):
docs = df.to_dict("records") # 高效
return docs
def read_excel(self, in_file_name="in", in_file_path=None, sheet_name=None, need_to_concat=True):
"""
params:
sheet_name:
传入None: 返回一个有序字典 OrderedDict([("<sheet名字>", <df对象>)])
( 需要用sheet名来按键取值)
need_to_concat:
当没有指定"sheet_name"时, 默认把所有sheet合并, 返回合并后的df
(当need_to_concat为False时, 不自动合并sheet, 而是返回一个 'excel字典对象')
"""
# 1. 先读取整个excel文件
if in_file_path is not None:
ordered_d = pd.read_excel(in_file_path, sheet_name=None)
elif in_file_path is None:
ordered_d = pd.read_excel(f"{desktop_path}/{in_file_name}.xlsx", sheet_name=None)
# 2. 读取对应sheet_name (返回df)
if sheet_name != None:
df = ordered_d.get(sheet_name)
del ordered_d # 释放中间过程对象的内存
return df
# 3. 合并多个sheet, 返回合并后的df
elif need_to_concat == True:
concat_df = pd.concat([sheet for sheet in ordered_d.values()], axis=0, ignore_index=True)
del ordered_d # 释放中间过程对象的内存
return concat_df
# 4. 返回这个excel字典对象 (每个键值对中, 以sheet的名字作为"键", 对应的df对象作为"值")
return ordered_d
def sort_df(self, df, ordered_field_lst):
# 1. 如果指定的字段在df中并不存在,则把该字段remove掉.确保不报错
ordered_field_lst_copy = ordered_field_lst.copy()
for field in ordered_field_lst_copy:
if field not in df.columns:
print("字段 {} 不在df中, 将其抛弃!".format(field))
ordered_field_lst.remove(field)
# 2. 把所需要保留的 "有序字段list" 作用在df上
return df[ordered_field_lst] # 指定顺序
def save_df_to_mongo(sefl, collection_obj, df):
dict_values = df.T.to_dict().values() # df的一种特殊结构? 不能直接取值?
try:
collection_obj.insert_many(dict_values)
except Exception as e:
raise Exception(f"[存储mongo异常] e:{e}")
def read_mongo(self, collection_obj, query={}, need_to_show_dict={}, df_name="foo", need_to_convert_date=True):
"""
params:
need_to_convert_date: 是否需要在读取mongo数据的时候, 转化日期格式
"""
# 不需要获取"_id"字段
need_to_show_dict.update({"_id":0})
# Make a query to the specific DB and Collection
# print(query, need_to_show_dict)
cursor = collection_obj.find(query, need_to_show_dict)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
df.df_name = df_name
if ("crawl_date" in df.columns) and (need_to_convert_date==True):
df["crawl_date"] = pd.to_datetime(df["crawl_date"])
if ("date" in df.columns) and (need_to_convert_date==True):
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.