prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# This file aims to iterate portfolios listed in scanner.csv,
# perform the same operations of iteratEternity.py but using the
# input excel as instructions to do opetations at scale.
# By changing scanner.csv we maintain the DATABASE updated.
# Reset format of excel with function BacktoBasiscs, so it can be re iterated in the future
import pandas as pd, datetime as dt, numpy as np
import smtplib, re, os
import credentials, glob
import base64, shutil
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email import encoders
import trackATM as tracker
from templateReport import * # template html of all content of the email
#from scanner import *
import yfinance as yahoo
file = []
for filename in glob.iglob('/home/lorenzo/Quanvas/DATABASE/*'):
file.append(filename)
csv = pd.DataFrame(file,columns=['Path'])
# create a common dataframe with all tickets, to avoid
# downloading tickets per each client
lista = []
for i in range(len(csv)):
index = pd.read_excel(csv.Path.values[i])
index = index.iloc[:,0].to_list()
lista += index
lista = list(dict.fromkeys(lista))
data = yahoo.download(lista,period="252d",interval="60m")["Adj Close"].fillna(method="ffill")
clients = pd.read_csv('/home/lorenzo/Quanvas/scanner.csv')
hoy = dt.date.today().strftime('%d-%m-%Y')
for i in range(len(clients)):
if clients.Status.values[i] == 0:
pass
elif clients.Status.values[i] == 1:
# Update values of the portfolio
cartera = pd.read_excel(csv.Path.values[i])
previous = cartera.copy()
path = str(clients['Path'][i])
portfolio = pd.DataFrame(index=cartera.iloc[:,0])
info = data.copy()
update = []
for j in range(len(portfolio)):
update.append(info[f'{portfolio.index.values[j]}'].values[-1])
portfolio = pd.DataFrame(index=cartera.iloc[:,0]) # rewrite
portfolio['nominal'] = cartera['nominal'].values
portfolio['pricePaid'] = cartera['price'].values
portfolio['weights'] = (portfolio['nominal'] * portfolio['pricePaid']) / sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['notionalStart'] = sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['oldLiquidity'] = cartera['liquid'].values
stocks = list(portfolio.index)
portfolio['priceToday'] = update
portfolio['notionalToday'] = sum(portfolio['priceToday'] * portfolio['nominal'])
portfolio['PnLpercent'] = portfolio['notionalToday'] / portfolio['notionalStart']
portfolio['PnLpercentEach'] = portfolio['priceToday'] / portfolio['pricePaid']
# En nuevo nominal sumamos el resultado obtenido mas el remanente liquido para reinvertir, siendo nuestro total disponible
portfolio['nominalNew'] = (portfolio['weights'] * (portfolio['notionalToday'] + portfolio['oldLiquidity']) // portfolio['priceToday']) # nuevo nominal
portfolio['adjust'] = portfolio['nominalNew'] - portfolio['nominal'] # ajuste nominal
portfolio['percentReb'] = (portfolio['nominalNew'] * portfolio['priceToday']) / sum(portfolio['nominalNew'] * portfolio['priceToday'])
# Columnas vinculantes para conectar mes anterior con el proximo ya armado
portfolio['notionalRebalance'] = sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['liquidityToReinvest'] = ((portfolio['notionalToday'] + portfolio['oldLiquidity']) - portfolio['notionalRebalance'])
capital = int(portfolio.notionalToday.values[0] + portfolio.liquidityToReinvest.values[0])
basics = portfolio.copy()
basics = tracker.BacktoBasics(basics)
folder = os.makedirs('Oldportfolios',exist_ok=True)
name = path
older = path.replace('./DATABASE/','./Oldportfolios/')
shutil.move(f'{name}',f'{older}')
newName = ' '.join(path.split()[:-1])
newName = (path.split()[:-1])
newName[-2] = str(capital)
newName = ' '.join(newName) + ' ' + str(dt.date.today()) + '.xlsx'
writer = pd.ExcelWriter(f'{newName}',engine='xlsxwriter')
basics.to_excel(writer,sheet_name=f'Updated {dt.date.today()}')
portfolio.to_excel(writer,sheet_name='Update Done')
previous.to_excel(writer,sheet_name='Previous Composition')
writer.save()
clients.TimeStamp.values[i] = dt.datetime.today().strftime('%Y-%m-%d %H:%M:%S')
# Reset Status to 0 as there any changes pending to do
clients.Status.values[i] = 0
elif clients.Status.values[i] == 2:
# Change capital ammount of the investment, positive or negative, satisfying original weights
cartera = pd.read_excel(csv.Path.values[i])
previous = cartera.copy()
path = str(clients['Path'][i])
portfolio = pd.DataFrame(index=cartera.iloc[:,0])
info = data.copy()
update = []
for j in range(len(portfolio)):
update.append(info[f'{portfolio.index.values[j]}'].values[-1])
portfolio = pd.DataFrame(index=cartera.iloc[:,0]) # rewrite
portfolio['nominal'] = cartera['nominal'].values
portfolio['pricePaid'] = cartera['price'].values
portfolio['weights'] = (portfolio['nominal'] * portfolio['pricePaid']) / sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['notionalStart'] = sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['oldLiquidity'] = cartera['liquid'].values
portfolio['priceToday'] = update
portfolio['notionalToday'] = sum(portfolio['priceToday'] * portfolio['nominal'])
portfolio['PnLpercent'] = portfolio['notionalToday'] / portfolio['notionalStart']
portfolio['PnLpercentEach'] = portfolio['priceToday'] / portfolio['pricePaid']
portfolio['DepositOrWithdraw'] = float(clients.Change.values[i])
portfolio['nominalNew'] = (portfolio['weights'] * ((portfolio['notionalToday'] + portfolio['oldLiquidity']) + portfolio['DepositOrWithdraw']) // portfolio['priceToday']) # nuevo nominal
portfolio['adjust'] = portfolio['nominalNew'] - portfolio['nominal'] # ajuste nominal
portfolio['percentReb'] = (portfolio['nominalNew'] * portfolio['priceToday']) / sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['notionalRebalance'] = sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['liquidityToReinvest'] = ((portfolio['notionalToday'] +portfolio['oldLiquidity']))
capital = int(portfolio.notionalToday.values[0] + portfolio.liquidityToReinvest.values[0])
basics = portfolio.copy()
basics = tracker.BacktoBasics(basics)
folder = os.makedirs('Oldportfolios',exist_ok=True)
name = path
older = path.replace('./DATABASE/','./Oldportfolios/')
shutil.move(f'{name}',f'{older}')
newName = ' '.join(path.split()[:-1])
newName = (path.split()[:-1])
newName[-2] = str(capital)
newName = ' '.join(newName) + ' ' + str(dt.date.today()) + '.xlsx'
writer = pd.ExcelWriter(f'{newName}',engine='xlsxwriter')
basics.to_excel(writer,sheet_name=f"Changed {dt.date.today()}")
portfolio.to_excel(writer,sheet_name='Operation Change')
previous.to_excel(writer,sheet_name='Previous Composition')
writer.save()
clients.TimeStamp.values[i] = dt.datetime.today().strftime('%Y-%m-%d %H:%M:%S')
# Reset Status to 0 as there any changes pending to do
clients.Status.values[i] = 0
clients.Change.values[i] = 0
elif clients.Status.values[i] == 3:
# Update risk levels by resetting Component-Value-at-Risk
# All process to gather Component-Value-at-Risk and apply it to current prices
cartera = pd.read_excel(csv.Path.values[i])
previous = cartera.copy()
path = str(clients['Path'][i])
portfolio = pd.DataFrame(index=cartera.iloc[:,0])
info = data.copy()
update = pd.DataFrame(info[f'{portfolio.index[0]}'].values,columns=[f'{portfolio.index.values[0]}'],index=info.index)
for j in range(1, len(portfolio)):
update[f'{portfolio.index.values[j]}'] = info[f'{portfolio.index[j]}'].values
returns = update.pct_change()
correlation = returns.corr() # correlation
covariance = returns.cov() # covariance
instruments = pd.DataFrame(index= update.columns)
sample = np.random.random_sample(size=(len(update.columns),1)) + (1.0 / len(data.columns))
sample /= np.sum(sample)
instruments['weights'] = sample # secure allocation is equal 1
instruments['deltas'] = (instruments.weights * correlation).sum() # deltas as elasticity of the assets
instruments['Stdev'] = returns.std()
instruments['stress'] = (instruments.deltas * instruments.Stdev) * 3 # stress applied at 4 deviations
instruments['portfolio_stress'] = instruments.stress.sum() # the stress of the portfolio
risk = pd.DataFrame(index=update.columns)
risk['numerator'] = (instruments.deltas.multiply(covariance)).sum()
risk['denominator'] = update.pct_change().std() * (-2.365)
risk['GradVaR'] = -risk.numerator / risk.denominator
risk['CVaRj'] = risk.GradVaR * instruments.deltas # Component VaR of the Risk Factors j
risk['thetai'] = (risk.CVaRj * correlation).sum() # Theta i of the instruments
risk['CVaRi'] = risk.thetai * (1/len(update.columns)) # Component VaR of the Instruments i
risk['totalCVaRi'] = risk.CVaRi.sum() #total CVaR of the portfolio
risk['CVaRattribution'] = risk.CVaRi / risk.totalCVaRi # risk allocation by instrument in the portfolio
riskadj = pd.DataFrame(index=update.columns)
riskadj['base'] = instruments['weights'].values
riskadj['CVaRattribution'] = risk.CVaRattribution.sort_values(axis=0,ascending=False)
riskadj['new'] = cartera['weights'].values # Choosing the option with the highest return
riskadj['condition'] = (riskadj.base / riskadj.CVaRattribution)
riskadj['newrisk'] = (riskadj.new / riskadj.CVaRattribution)
riskadj['differences'] = (riskadj.newrisk - riskadj.condition) # apply this result as a percentage to multiply new weights
riskadj['adjustments'] = (riskadj.newrisk - riskadj.condition) / riskadj.condition #ALARM if its negative sum up the difference,
#if it is positive rest it, you need to have 0
riskadj['suggested'] = riskadj.new * (1 + riskadj.adjustments)
riskadj['tototal'] = riskadj.suggested.sum()
riskadj['MinCVaR'] = riskadj.suggested / riskadj.tototal
riskadj[riskadj.MinCVaR>= 0.12] = 0.12
riskadj['MinCVaR'] = riskadj['MinCVaR'] / sum(riskadj['MinCVaR'])
portfolio = pd.DataFrame(index=cartera.iloc[:,0]) # rewrite
portfolio['nominal'] = cartera['nominal'].values
portfolio['pricePaid'] = cartera['price'].values
portfolio['weights'] = riskadj.MinCVaR.values
portfolio['notionalStart'] = sum(portfolio['nominal'] * portfolio['pricePaid'])
portfolio['oldLiquidity'] = cartera['liquid'].values
portfolio['priceToday'] = update.tail(1).T.values
portfolio['notionalToday'] = sum(portfolio['priceToday'] * portfolio['nominal'])
portfolio['PnLpercent'] = portfolio['notionalToday'] / portfolio['notionalStart']
portfolio['PnLpercentEach'] = portfolio['priceToday'] / portfolio['pricePaid']
portfolio['nominalNew'] = ((portfolio['weights'] * (portfolio['notionalToday'] + portfolio['oldLiquidity'])) // portfolio['priceToday']) # nuevo nominal
portfolio['adjust'] = portfolio['nominalNew'] - portfolio['nominal'] # ajuste nominal
portfolio['percentReb'] = (portfolio['nominalNew'] * portfolio['priceToday']) / sum(portfolio['nominalNew'] * portfolio['priceToday'])
# Columnas vinculantes para conectar mes anterior con el proximo ya armado
portfolio['notionalRebalance'] = sum(portfolio['nominalNew'] * portfolio['priceToday'])
portfolio['liquidityToReinvest'] = (portfolio['notionalToday'] + portfolio['oldLiquidity']) - portfolio['notionalRebalance']
capital = int(portfolio.notionalToday.values[0] + portfolio.liquidityToReinvest.values[0])
basics = portfolio.copy()
basics = tracker.BacktoBasics(basics)
folder = os.makedirs('Oldportfolios',exist_ok=True)
name = path
old = name.replace('./DATABASE/','./Oldportfolios/')
shutil.move(f'{name}',f'{old}')
newName = ' '.join(path.split()[:-1])
newName = (path.split()[:-1])
newName[-2] = str(capital)
newName = ' '.join(newName) + ' ' + str(dt.date.today()) + '.xlsx'
writer = | pd.ExcelWriter(f'{newName}',engine='xlsxwriter') | pandas.ExcelWriter |
# coding: utf-8
'''
feature list
- order_number_rev()
- dep_prob()
- aisle_prob()
- dow_prob()
- hour_prob()
- organic_prob()
- latest_order()
- model()
'''
import pymysql
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import csv
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBClassifier
from xgboost import plot_importance
from xgboost import plot_tree
###################### DB connect
db = pymysql.connect(host="", port=3306, user="", passwd="",db="")
## 구매하는 상품에 대해서 회원이 재구매를 했었는지, 얼마나 샀었는지 알 수 있는 함수.
def latest_order():
## 주문 정보가 있는 테이블에서 불러 올 것
SQL = "SELECT order_id, user_id, order_number FROM orders"
orders_df = pd.read_sql(SQL, db)
## 회원의 구매 내역에서 불러 올 것
SQL = "SELECT * FROM order_products__prior"
prior_df = pd.read_sql(SQL, db)
# merge
prior_df = pd.merge(prior_df, orders_df, how="inner", on="order_id")
# 최근 구매한 기록 - 제일 큰 order_id 찾고, 최근 구매한 상품 목록 찾기
prior_grouped_df = prior_df.groupby("user_id")["order_number"].aggregate("max").reset_index()
prior_df_latest = pd.merge(prior_df, prior_grouped_df, how="inner", on=["user_id", "order_number"])
# 칼럼 이름 변경 reordered >> reordered_latest
prior_df_latest = prior_df_latest[["user_id", "product_id", "reordered"]]
prior_df_latest.columns = ["user_id", "product_id", "reordered_latest"]
# 유저가 구매한 상품들이 몇번 구매됐고 재구매는 몇번 되었는지
prior_df = prior_df.groupby(["user_id","product_id"])["reordered"].aggregate(["count", "sum"]).reset_index()
# 칼럼명 바꾸기!
prior_df.columns = ["user_id", "product_id", "reordered_count", "reordered_sum"]
# merge prior_df & latest df
latest_order = pd.merge(prior_df, prior_df_latest, how="left", on=["user_id","product_id"])
# 유저 아이디로 나중에 머지 할 것임!!
return latest_order
### 상품, 유저 아이디로 뼈대와 결합
def order_ratio_bychance():
## 주문 정보가 있는 테이블에서 불러 올 것
SQL = "SELECT order_id, user_id, order_number FROM orders"
orders_df = pd.read_sql(SQL, db)
## 회원의 구매 내역에서 불러 올 것
SQL = "SELECT order_id, product_id FROM order_products__prior"
prior_df = pd.read_sql(SQL, db)
#merge
order_prior = | pd.merge(prior_df, orders_df, how='inner', on=['order_id']) | pandas.merge |
import matplotlib
from matplotlib import collections as mc
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(style='white', palette='Blues')
import numpy as np
import pandas as pd
from collections import namedtuple
from mpl_toolkits.mplot3d import Axes3D
from time import time as t
import os
notebook_path = os.path.dirname(os.path.realpath(__file__))
root_path = os.path.abspath(os.path.join(notebook_path, os.pardir))
EpisodeStats = namedtuple("Stats",["episode_lengths", "episode_rewards"])
def plot_cost_to_go_mountain_car(env, estimator, num_tiles=20, show=True):
x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles)
y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles)
X, Y = np.meshgrid(x, y)
#Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
Z = np.apply_along_axis(lambda _: np.max(estimator.predict(_)), 2, np.dstack([X, Y]))
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0, alpha=0.5)
ax.set_xlabel('Position')
ax.set_ylabel('Velocity')
ax.set_zlabel('Value')
#ax.set_title("Mountain \"Cost To Go\" Function")
ax.set_title("Mountain Value Function")
fig.colorbar(surf)
fig.savefig("{}/data/mc_value_fn_{}.png".format(root_path, t()), ppi=300, bbox_inches='tight')
if show:
plt.show(fig)
else:
plt.close(fig)
def plot_value_function(V, title="Value Function", show=True):
"""
Plots the value function as a surface plot.
"""
min_x = min(k[0] for k in V.keys())
max_x = max(k[0] for k in V.keys())
min_y = min(k[1] for k in V.keys())
max_y = max(k[1] for k in V.keys())
x_range = np.arange(min_x, max_x + 1)
y_range = np.arange(min_y, max_y + 1)
X, Y = np.meshgrid(x_range, y_range)
# Find value for all (x, y) coordinates
Z_noace = np.apply_along_axis(lambda _: V[(_[0], _[1], False)], 2, np.dstack([X, Y]))
Z_ace = np.apply_along_axis(lambda _: V[(_[0], _[1], True)], 2, np.dstack([X, Y]))
def plot_surface(X, Y, Z, title):
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0)
ax.set_xlabel('Player Sum')
ax.set_ylabel('Dealer Showing')
ax.set_zlabel('Value')
ax.set_title(title)
ax.view_init(ax.elev, -120)
fig.colorbar(surf)
plt.show()
plot_surface(X, Y, Z_noace, "{} (No Usable Ace)".format(title))
plot_surface(X, Y, Z_ace, "{} (Usable Ace)".format(title))
if show:
plt.show(fig)
else:
plt.close(fig)
def plot_episode_stats(stats, smoothing_window=10, show=True):
# Plot the episode length over time
fig1 = plt.figure(figsize=(10,5))
plt.plot(stats.episode_lengths)
plt.xlabel("Episode")
plt.ylabel("Episode Length")
plt.title("Episode Length over Time")
#fig1.savefig("{}/data/episode_len_{}.png".format(root_path, t()), ppi=300, bbox_inches='tight')
if show:
plt.show(fig1)
else:
plt.close(fig1)
# Plot the episode reward over time
fig2 = plt.figure(figsize=(10,5))
rewards_smoothed = | pd.Series(stats.episode_rewards) | pandas.Series |
"""
An exhaustive list of pandas methods exercising NDFrame.__finalize__.
"""
import operator
import re
import numpy as np
import pytest
import pandas as pd
# TODO:
# * Binary methods (mul, div, etc.)
# * Binary outputs (align, etc.)
# * top-level methods (concat, merge, get_dummies, etc.)
# * window
# * cumulative reductions
not_implemented_mark = pytest.mark.xfail(reason="not implemented")
mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])
frame_data = ({"A": [1]},)
frame_mi_data = ({"A": [1, 2, 3, 4]}, mi)
# Tuple of
# - Callable: Constructor (Series, DataFrame)
# - Tuple: Constructor args
# - Callable: pass the constructed value with attrs set to this.
_all_methods = [
(
pd.Series,
(np.array([0], dtype="float64")),
operator.methodcaller("view", "int64"),
),
(pd.Series, ([0],), operator.methodcaller("take", [])),
(pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
(pd.Series, ([0],), operator.methodcaller("repeat", 2)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("reset_index")),
marks=pytest.mark.xfail,
),
(pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
pytest.param(
(pd.Series, ([0],), operator.methodcaller("to_frame")), marks=pytest.mark.xfail
),
(pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
(pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
(pd.Series, ([0, 0],), operator.methodcaller("round")),
(pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),
(pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),
(pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),
(pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),
(pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),
(pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),
(pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),
(pd.Series, ([0, 0],), operator.methodcaller("shift")),
(pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),
(pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),
(pd.Series, ([0, 0],), operator.methodcaller("isna")),
(pd.Series, ([0, 0],), operator.methodcaller("isnull")),
(pd.Series, ([0, 0],), operator.methodcaller("notna")),
(pd.Series, ([0, 0],), operator.methodcaller("notnull")),
(pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),
# TODO: mul, div, etc.
(
pd.Series,
([0], pd.period_range("2000", periods=1)),
operator.methodcaller("to_timestamp"),
),
(
pd.Series,
([0], pd.date_range("2000", periods=1)),
operator.methodcaller("to_period"),
),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("dot", pd.DataFrame(index=["A"])),
),
marks=pytest.mark.xfail(reason="Implement binary finalize"),
),
(pd.DataFrame, frame_data, operator.methodcaller("transpose")),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))),
(pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")),
(pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")),
(pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")),
(pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)),
(pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])),
(pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])),
(pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])),
(pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),
(pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),
(pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),
(pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),
(pd.DataFrame, frame_data, operator.methodcaller("reset_index")),
(pd.DataFrame, frame_data, operator.methodcaller("isna")),
(pd.DataFrame, frame_data, operator.methodcaller("isnull")),
(pd.DataFrame, frame_data, operator.methodcaller("notna")),
(pd.DataFrame, frame_data, operator.methodcaller("notnull")),
(pd.DataFrame, frame_data, operator.methodcaller("dropna")),
(pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),
(pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),
(pd.DataFrame, frame_data, operator.methodcaller("sort_index")),
(pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),
(pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")),
(pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("add", pd.DataFrame(*frame_data)),
),
marks=not_implemented_mark,
),
# TODO: div, mul, etc.
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add),
),
marks=not_implemented_mark,
),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("combine_first", pd.DataFrame(*frame_data)),
),
marks=not_implemented_mark,
),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("update", pd.DataFrame(*frame_data)),
),
marks=not_implemented_mark,
),
(pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")),
(
pd.DataFrame,
({"A": [1], "B": [1]},),
operator.methodcaller("pivot_table", columns="A"),
),
(
pd.DataFrame,
({"A": [1], "B": [1]},),
operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]),
),
(pd.DataFrame, frame_data, operator.methodcaller("stack")),
pytest.param(
(pd.DataFrame, frame_data, operator.methodcaller("explode", "A")),
marks=not_implemented_mark,
),
(pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")),
pytest.param(
(
pd.DataFrame,
({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},),
operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]),
),
marks=not_implemented_mark,
),
pytest.param(
(pd.DataFrame, frame_data, operator.methodcaller("applymap", lambda x: x))
),
pytest.param(
(
pd.DataFrame,
frame_data,
operator.methodcaller("append", | pd.DataFrame({"A": [1]}) | pandas.DataFrame |
from PreProcessing.metaPipeline import PipelineMeta
import pandas as pd
import numpy as np
import librosa
class MelSpectrogram(PipelineMeta):
def __init__(self, metaFileName='Meta.csv'):
"""
Initialize class, try to load meta.csv containing filepath metadata
Upon failure inherit from PipelineMeta() and generate missing metadata
input
metaFileName: Name of saved metadata dataframe
"""
try:
self.meta = | pd.read_csv(metaFileName) | pandas.read_csv |
from flask import Flask, render_template, request
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
sns.set_style("whitegrid")
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn import datasets, linear_model
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from pandas import datetime
import h5py
import warnings
warnings.filterwarnings('ignore')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('login.html')
@app.route('/index', methods=['GET','POST'])
def index():
user=request.form['un']
pas=request.form['pw']
cr=pd.read_excel('cred.xlsx')
un=np.asarray(cr['Username']).tolist()
pw=np.asarray(cr['Password']).tolist()
cred = dict(zip(un, pw))
if user in un:
if(cred[user]==pas):
return render_template('index.html')
else:
k=1
return render_template('login.html',k=k)
else:
k=1
return render_template('login.html',k=k)
@app.route('/data_viz')
def data_viz():
return render_template("data_viz.html")
@app.route('/file_upload')
def file_upload():
return render_template("file_upload.html")
@app.route('/upload_printed', methods=['GET','POST'])
def upload_printed():
abc=request.files['printed_doc']
test1=pd.read_csv(abc)
test=test1
train=pd.read_csv('train.csv')
#test=pd.read_csv('test.csv')
store=pd.read_csv('stores.csv')
feature= | pd.read_csv('features.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
EMCEE deconvolution using the fast parameterised model of the 750l radon
detector based on W&Z's 1996 paper
"""
from __future__ import (absolute_import, division,
print_function)
import glob
import datetime
import os
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.stats import poisson, norm, lognorm, cauchy
import emcee
from . import util
from . import fast_detector
from . import theoretical_model as tm
from .deconvolve import FigureManager
import logzero
logger = logzero.logger
lamrn = 2.1001405267111005e-06
lama = 0.0037876895112565318
lamb = 0.00043106167945270227
lamc = 0.00058052527685087548
# somewhat complicated for python 2/3 compatability (method from SO)
try:
isinstance("", basestring)
def is_string(s):
return isinstance(s, basestring)
except NameError:
def is_string(s):
return isinstance(s, str)
def detector_model_wrapper(timestep, initial_state, external_radon_conc,
internal_airt_history,
parameters, interpolation_mode=1,
return_full_state=False):
"""
TODO:
"""
t = np.arange(0, timestep*len(external_radon_conc), timestep, dtype=np.float)
params = fast_detector.parameter_array_from_dict(parameters)
soln = fast_detector.detector_model(timestep, interpolation_mode,
external_radon_conc, internal_airt_history,
initial_state, params)
df = pd.DataFrame(index=t/60.0, data=soln)
df.columns = 'Nrnd,Nrnd2,Nrn,Fa,Fb,Fc,Acc_counts'.split(',')
eff = parameters['eff']
df['count rate'] = eff*(df.Fa*lama + df.Fc*lamc)
if return_full_state:
#TODO - this is supposed to include the initial values
assert(False)
return df
def detector_model_observed_counts(timestep, initial_state, external_radon_conc,
internal_airt_history,parameters, interpolation_mode=0):
"""just return the observed_counts timeseries"""
params = fast_detector.parameter_array_from_dict(parameters)
soln = fast_detector.detector_model(timestep, interpolation_mode,
external_radon_conc, internal_airt_history,
initial_state, params)
return np.diff(soln[:,-1])
def calc_detector_efficiency(parameters):
"""
Compute steady-state counting efficiency (counts per Bq/m3 of radon)
"""
Y0 = fast_detector.calc_steady_state(Nrn = 1.0/lamrn, Q=parameters['Q'],
rs=parameters['rs'],
lamp=parameters['lamp'],
V_tank=parameters['V_tank'],
recoil_prob=parameters['recoil_prob'],
eff=parameters['eff'])
counts_per_second = Y0[-1]
steady_state_efficiency = counts_per_second / 1.0
return steady_state_efficiency
def gen_initial_guess(observed_counts, one_sided_prf, reg='tv'):
"""
an initial guess based on the RL deconvolution
use emcee.utils.sample_ball to generate perturbed guesses for each walker
"""
N = len(observed_counts)
M = len(one_sided_prf)
symmetric_prf = np.r_[np.zeros(M-1), one_sided_prf]
Ndim = len(observed_counts) + M - 1
# pad first to avoid end effects
pad0 = np.ones(M)*observed_counts[0]
pad1 = np.ones(M)*observed_counts[-1]
observed_counts_padded = np.r_[pad0, observed_counts, pad1]
# fill NaN values by interpolation
observed_counts_padded = pd.Series(observed_counts_padded).interpolate().values
initial_guess = util.deconvlucy1d(observed_counts_padded, symmetric_prf,
iterations=1000, reg=reg)
# exclude padding from return value
initial_guess = initial_guess[M-1:M+N-1]
return initial_guess
##### utility functions for fit_parameters_to_obs
def unpack_parameters(p, model_parameters):
"""
unpack paramters from vector and return dict
"""
nhyper = model_parameters['nhyper']
nstate = model_parameters['nstate']
variable_parameter_names = model_parameters['variable_parameter_names']
Y0 = p[:nstate]
parameters = {'Y0':Y0}
#variable_parameter_names = 'Q_external', 'Q', 'rs', 'lamp', 't_delay'
#nhyper = len(variable_parameter_names)
variable_parameters = p[nstate:nhyper+nstate]
radon_concentration_timeseries = p[nhyper+nstate:]
parameters.update( zip(variable_parameter_names, variable_parameters) )
return parameters, Y0, variable_parameters, radon_concentration_timeseries
def pack_parameters(Y0, variable_parameters, radon_concentration_timeseries=[]):
return np.r_[Y0, variable_parameters, radon_concentration_timeseries]
def detector_model_specialised(p, parameters):
"""
Detector model, specialised for use with emcee
"""
(varying_parameters, Y0, variable_parameters,
radon_concentration_timeseries) = unpack_parameters(p, parameters)
parameters.update(varying_parameters)
# link recoil probability to screen efficiency
parameters['recoil_prob'] = 0.5*(1.0-parameters['rs'])
N = len(radon_concentration_timeseries)
if N==0:
if 'transform_radon_timeseries' in parameters:
assert not parameters['transform_radon_timeseries']
# this means that radon_conc_is_known
radon_concentration_timeseries = parameters['radon_conc']
N = len(radon_concentration_timeseries)
timestep = parameters['tres']
internal_airt_history = parameters['internal_airt_history']
if 'transform_radon_timeseries' in parameters and \
parameters['transform_radon_timeseries']:
radon_concentration_timeseries = \
fast_detector.inverse_transform_radon_concs(radon_concentration_timeseries)
#print(external_radon_conc[0:4])
cr = detector_model_observed_counts(timestep, parameters['Y0'],
radon_concentration_timeseries,
internal_airt_history,
parameters,
interpolation_mode=
parameters['interpolation_mode'])
detector_count_rate = cr
if 'background_count_rate' in parameters:
detector_count_rate += parameters['background_count_rate'] * parameters['tres']
return detector_count_rate
def poisson_pmf_for_testing(population_mean, obs_count):
"""
Implementation is only sensible for very small inputs (max 15 or so)
http://en.wikipedia.org/wiki/Poisson_distribution
$\!f(k; \lambda)= \Pr(X{=}k)= \frac{\lambda^k e^{-\lambda}}{k!}$
from scipy.stats import poisson
obs_count = np.arange(1, 10)
pmf = [poisson_pmf_for_testing(5, itm) for itm in obs_count]
plt.plot(obs_count, pmf)
plt.plot(obs_count, poisson.pmf(obs_count, 5))
# check scipy version works with mu=100
obs_count = np.arange(50, 151)
plt.plot(obs_count, poisson.pmf(obs_count, 100))
"""
k = obs_count
lam = population_mean
pmf = lam**k * np.exp(-lam) / np.math.factorial(k)
return pmf
def lnlike(p, parameters):
observed_counts = parameters['observed_counts']
Nobs = len(observed_counts)
detector_count_rate = detector_model_specialised(p, parameters)
if not len(detector_count_rate) == Nobs-1:
logger.error(f"detector counts does not equal Nobs: {len(detector_count_rate)}, {Nobs}")
raise ValueError()
#scale counts so that total number of counts is preserved (?)
# detector_count_rate
lp = poisson.logpmf(observed_counts[1:], detector_count_rate)
lp = np.nansum(lp)
#f, ax = plt.subplots()
#ax.plot(observed_counts)
#ax.plot(detector_count_rate)
#plt.show()
return lp
def lnprior_hyperparameters(p, parameters):
"""
Prior constraints on hyper-parameters
"""
variable_parameters_mu_prior = parameters['variable_parameters_mu_prior']
variable_parameters_sigma_prior = parameters['variable_parameters_sigma_prior']
ub = parameters['variable_parameter_upper_bounds']
lb = parameters['variable_parameter_lower_bounds']
hp_names = parameters['variable_parameter_names']
## debugging check
#print(parameters['variable_parameter_names'])
#print('mu:')
#print(variable_parameters_mu_prior)
#print('sigma:')
#print(variable_parameters_sigma_prior)
#print('sigma/mu:')
#print(variable_parameters_sigma_prior/variable_parameters_mu_prior)
(varying_parameters, Y0, variable_parameters,
radon_concentration_timeseries) = unpack_parameters(p, parameters)
if not np.alltrue(variable_parameters <= ub):
exidx = variable_parameters > ub
#print(np.array(parameters['variable_parameter_names'])[exidx],
# ub[exidx],
# variable_parameters[exidx])
#print('parameter upper bound exceeded.')
lp = -np.inf
elif not np.alltrue(variable_parameters >= lb):
#print('parameter lower bound exceeded.')
lp = -np.inf
else:
# assume that all priors are normally-distributed
lp = norm.logpdf(variable_parameters, variable_parameters_mu_prior,
variable_parameters_sigma_prior)
# lamp is log-normally distributed, so take the log and then proceed
# as if it's normally-distributed
if 'lamp' in hp_names:
idx_lamp = hp_names.index('lamp')
#print('lamp in hp_names; val, mu, sigma:')
#print(variable_parameters[idx_lamp],variable_parameters_mu_prior[idx_lamp],variable_parameters_sigma_prior[idx_lamp])
lp[idx_lamp] = norm.logpdf(np.log(variable_parameters[idx_lamp]),
np.log(variable_parameters_mu_prior[idx_lamp]),
np.log(variable_parameters_sigma_prior[idx_lamp]))
lp = lp.sum()
return lp
def lnprior_Y0(Y0, parameters):
"""
Prior on detector state at t=0
"""
if Y0.min() <= 0.0:
return -np.inf
Y0_mu_prior = parameters['Y0_mu_prior']
# note - for parameter definitions see
# http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/lognormal_distributions.ipynb
sigma = np.log(2.0)/np.log(Y0_mu_prior) # Standard deviation of log(X) - factor of two
shape = sigma # Scipy's shape parameter
scale = Y0_mu_prior # Scipy's scale parameter = np.exp( mean of log(X) )
ret = lognorm.logpdf(Y0, shape, loc=0, scale=scale)
ret = ret[:-1].sum() # the last state variable (Acc_counts) isn't constrained
return ret
def lnprior_difference(radon_concentration_timeseries, parameters):
"""
log-normal prior on step-by-step changes in radon concentration
"""
if 'transform_radon_timeseries' in parameters and \
parameters['transform_radon_timeseries']:
radon_concentration_timeseries = \
fast_detector.inverse_transform_radon_concs(radon_concentration_timeseries)
p = radon_concentration_timeseries
# Parameters must all be > 0
if p.min() <= 0:
lp = -np.inf
# print('rn conc < 0')
else:
dpdt = np.diff(np.log(p))
# TODO: this was an experiment and should probably be removed
if 'ignore_N_steps' in parameters:
n = parameters['ignore_N_steps']
if n > 0 and n < len(dpdt):
dpdt = np.sort(dpdt)[n:-n]
else:
print('unexpected value for "ignore_N_steps":', n)
print('it should be an integer, 1 or greater')
mu = 0.0 # mean expected change - no change
sigma = np.log(parameters['expected_change_std'])
#sigma = np.log(2) #standard deviation - factor of two change
#sigma = np.log(1.5)
#sigma = np.log(1.05) # much more smoothing
lp = norm.logpdf(dpdt, mu, sigma).sum()
## experiment with Cauchy distribution
# lp = cauchy.logpdf(dpdt, mu, sigma).sum()
return lp
def lnprior_params(p, parameters):
"""
comine priors
"""
(varying_parameters, Y0, variable_parameters, radon_concentration_timeseries
) = unpack_parameters(p, parameters)
lp = 0.0
if len(radon_concentration_timeseries) > 0:
# radon concentrations are not known (deconvolution)
lp += lnprior_difference(radon_concentration_timeseries, parameters)
lp += lnprior_Y0(Y0, parameters)
lp += lnprior_hyperparameters(p, parameters)
if 'total_efficiency' in parameters:
# prior on total efficiency
# 1. put all parameters together in one dictionary
allparams = dict()
allparams.update(parameters)
allparams.update(varying_parameters)
# 2. compute net efficiency
mu = parameters['total_efficiency']
sigma = mu*parameters['total_efficiency_frac_error']
rs=allparams['rs']
Y0 = fast_detector.calc_steady_state(1/lamrn,
Q=allparams['Q'], rs=rs,
lamp=allparams['lamp'],
V_tank=allparams['V_tank'],
recoil_prob=0.5*(1-rs),
eff=allparams['eff'])
total_efficiency = Y0[-1]
if total_efficiency <= 0.0: return -np.inf
lp += norm.logpdf(total_efficiency, mu, sigma)
return lp
def lnprob(p, parameters):
# print(len(p), p/p00)
lp = lnprior_params(p, parameters)
if np.isfinite(lp):
lp += lnlike(p, parameters)
if lp == -np.Inf:
pass
#print('lp: minus Inf.')
#print('lnprior:', lnprior_params(p, parameters))
#print('lnlike:', lnlike(p, parameters))
if np.isnan(lp):
# this should not happen, but let's press on regardless with an
# error message
logger.error(f'NaN during log-probability calculation, set to minus Inf. Parameters are: {p}')
lp = -np.inf
return lp
def fit_parameters_to_obs(t, observed_counts, radon_conc=[],
internal_airt_history=[], parameters=dict(),
variable_parameter_names = (),
variable_parameters_mu_prior = np.array([]),
variable_parameters_sigma_prior = np.array([]),
walkers_per_dim=2, keep_burn_in_samples=False, thin=2,
nthreads=1,
iterations=200,
figure_manager=None):
"""
TODO: doc
"""
# observed counts need to be integers
not_na = np.isfinite(observed_counts)
assert np.alltrue(np.round(observed_counts[not_na])==observed_counts[not_na])
# make a local copy of the paramters dictionary
parameters_ = parameters
# default values for parameters
parameters = {
'transform_radon_timeseries':False
}
parameters.update(parameters_)
nhyper = len(variable_parameter_names)
nstate = fast_detector.N_state
transform_radon_timeseries = parameters['transform_radon_timeseries']
# temporarily set to zero for MAP estimate (will restore before sampling)
parameters['transform_radon_timeseries'] = False
radon_conc_is_known = (len(radon_conc) == len(t))
parameters['observed_counts'] = observed_counts
if radon_conc_is_known:
logger.info("Trying to adjust hyper parameters to match observations")
parameters['radon_conc'] = radon_conc
else:
logger.info("Trying to deconvolve observations")
# default - constant temperature of 20 degC
if len(internal_airt_history) == 0:
logger.warning("Internal air temperature not provided. Assuming constant 20degC")
internal_airt_history = np.zeros(len(t)) + 273.15 + 20.0
parameters.update( dict(variable_parameter_names=variable_parameter_names,
nhyper=nhyper,
nstate=nstate,
variable_parameters_mu_prior=variable_parameters_mu_prior,
variable_parameters_sigma_prior=
variable_parameters_sigma_prior,
internal_airt_history=internal_airt_history))
# Detector state at t=0, prior and initial guess
Y0 = fast_detector.calc_steady_state(Nrn=1.0, Q=parameters['Q'], rs=parameters['rs'],
lamp=parameters['lamp'],
V_tank=parameters['V_tank'],
recoil_prob=parameters['recoil_prob'],
eff=parameters['eff'])
Nrnd,Nrnd2,Nrn,Fa,Fb,Fc, Acc_counts = Y0
expected_counts = parameters['eff']*(Fa*lama + Fc*lamc) * (t[1]-t[0])
scale_factor = observed_counts[0] / expected_counts
Y0 *= scale_factor
Y0_mu_prior = Y0.copy()
parameters.update( dict(Y0_mu_prior=Y0_mu_prior) )
parameters['tres'] = t[1] - t[0]
assert(np.allclose(np.diff(t), parameters['tres']))
rl_radon_timeseries = []
rltv_radon_timeseries = []
if not radon_conc_is_known:
# generate initial guess by (1) working out the PSF, (2) RL deconv.
# determine PSF for these parameters
# unless...
# we think we're fitting a calibration, in which case just guess
# a constant radon concentration
psf_radon_conc = np.zeros(observed_counts.shape)
psf_radon_conc[1] = 1.0/lamrn
params_psf = dict()
params_psf.update(parameters)
params_psf['t_delay'] += parameters['tres']/2.0 # move to middle-of-interval
df = detector_model_wrapper(parameters['tres'], Y0*0.0,
psf_radon_conc,
internal_airt_history=internal_airt_history*0 + 273.15,
parameters=params_psf,
interpolation_mode=parameters['interpolation_mode'])
#work out when we've seen 99% of the total counts
nac = df.Acc_counts/df.Acc_counts.iloc[-1]
idx_90 = int(nac[(nac > 0.999)].index[0])
if idx_90 % 2 == 0:
idx_90 += 1 # must be odd
#TODO: adding that small constant is a hack because RL deconv doesn't work
# when there's a zero in the one-sided prf (apparently)
one_sided_prf = df['count rate'].values[:idx_90] + 0.000048
one_sided_prf = one_sided_prf / one_sided_prf.sum()
if 'background_count_rate' in parameters:
background_counts = parameters['background_count_rate'] * parameters['tres']
else:
background_counts = 0
background_corrected_counts = observed_counts - background_counts
background_corrected_counts[background_corrected_counts<=0] = 1
rl_radon_timeseries = gen_initial_guess(background_corrected_counts, one_sided_prf,
reg=None)
rltv_radon_timeseries = gen_initial_guess(background_corrected_counts,
one_sided_prf)
radon_conc = rltv_radon_timeseries.copy()
logger.debug(f"RLTV should preserve total counts, this should be close to 1: {radon_conc.sum()/observed_counts.sum()}")
# don't accept radon concentration less than 30 mBq/m3 in the guess
mbq30 = 100 # TODO: this is counts, work out a proper threshold 30e-3/tm.lamrn
rnavconc = radon_conc.mean()
radon_conc[radon_conc < mbq30] = mbq30
radon_conc = radon_conc/radon_conc.mean() * rnavconc
# if we're simulating a calibration then begin with a guess of
# constant ambient radon concentration
if 'cal_source_strength' in parameters and \
parameters['cal_source_strength'] > 0:
# counts per counting interval, gets converted to atoms/m3 later
radon_conc = np.zeros(radon_conc.size) + observed_counts[0]
# if there are invalid values contained in the initial guess,
# then also begin with starting guess constant values
missing_values_present = not np.isfinite(observed_counts).all()
if missing_values_present:
# radon_conc here is actually the counts per counting interval
logger.info("Found Inf or NaN values in observed counts, skipping Richardson-Lucy deconvolution")
radon_conc = np.zeros(radon_conc.size) + np.nanmean(observed_counts)
f1, ax = plt.subplots()
ax.plot(one_sided_prf)
ax.set_title('point-response function, used for RLTV initial guess')
f2, ax = plt.subplots()
ax.plot(observed_counts, label='observed counts')
ax.plot(radon_conc, label='RLTV deconvolution')
ax.legend()
if figure_manager is not None:
figure_manager.save_figure(f1, 'emcee-point-response-function')
figure_manager.save_figure(f2, 'emcee-RLTV-deconvolution')
rs = parameters['rs']
Y0eff = fast_detector.calc_steady_state(1/lamrn,
Q=parameters['Q'], rs=rs,
lamp=parameters['lamp'],
V_tank=parameters['V_tank'],
recoil_prob=0.5*(1-rs),
eff=parameters['eff'])
total_efficiency = Y0eff[-1]
if 'total_efficiency' in parameters:
logger.info(f"prescribed total eff:{parameters['total_efficiency']}")
# detector overall efficiency
total_efficiency_correction = parameters['total_efficiency']
else:
total_efficiency_correction = total_efficiency
logger.debug(f"computed total eff: {total_efficiency}")
radon_conc = (radon_conc / parameters['tres'] /
total_efficiency_correction / lamrn )
p_rltv = pack_parameters(Y0_mu_prior, variable_parameters_mu_prior, radon_conc)
modcounts = detector_model_specialised(p_rltv, parameters)
logger.debug(f"Model initial guess should preserve total counts. This should be close to 1: {modcounts.sum()/observed_counts.sum()}")
# force initial guess to preserve total counts
# -- unless there are NaNs in observed counts, in which case we can't
if np.isfinite(observed_counts.sum()):
tc_ratio = modcounts.sum()/observed_counts.sum()
radon_conc /= tc_ratio
p_rltv_adj = pack_parameters(Y0_mu_prior, variable_parameters_mu_prior, radon_conc)
modcounts = detector_model_specialised(p_rltv_adj, parameters)
logger.debug(f"---- after adjustment: {modcounts.sum()/observed_counts.sum()}")
f, ax = plt.subplots()
ax.plot(observed_counts, label='Observed counts')
ax.plot(np.r_[np.nan, modcounts],
label='Modelled counts using RLTV radon timeseries')
ax.legend()
if figure_manager is not None:
pass
# redundant - plot is on another figure
# figure_manager.save_figure(f, 'emcee-modelled-counts-from-RLTV')
assert len(radon_conc) == len(observed_counts)
# TEST: use scaled counts as the inial guess
# radon_conc = (observed_counts / parameters['tres'] /
# parameters['total_efficiency'] / lamrn )
if radon_conc_is_known:
p00 = pack_parameters(Y0_mu_prior, variable_parameters_mu_prior, [])
# I was seeing a weird error, when re-using p, where the old
# p (defined above) became immutable.
assert(len(p00) == len(Y0_mu_prior) + len(variable_parameters_mu_prior))
else:
p00 = pack_parameters(Y0_mu_prior, variable_parameters_mu_prior, radon_conc)
# trap the same error where p doesn't get updated
assert(p00[-1] == radon_conc[-1])
# a version where the response function isn't allowed to vary
p00_fixed_response = pack_parameters(Y0_mu_prior, [], radon_conc)
p = p00.copy()
#print(p)
#print(parameters)
#print(unpack_parameters(p, parameters)[0])
# we should now be able to compute the liklihood of the initial location p
logger.debug(f"Initial guess P0 log-prob: {lnprob(p, parameters)}")
if not np.isfinite(lnprob(p,parameters)):
logger.error("non-finite P0 for initial guess")
logger.error(f"p-vector: {p}")
logger.error("parameters:")
for k,v in parameters.items():
logger.error("'{}' : {}".format(k, v))
assert np.isfinite(lnprob(p, parameters))
# the function should return -np.inf for negative values in parameters
# (with the exception of the delay time)
#for ii in range(len(p)):
# pp = p.copy()
# pp[ii] *= -1
# print(ii, lnprob(pp, parameters))
#assert(False)
#print("check:", lnprob(np.r_[Y0_mu_prior, p[5:]], parameters))
if radon_conc_is_known or not radon_conc_is_known:
# take the starting location from the MAP
def minus_lnprob(p,parameters):
#print(p[nhyper+nstate:nhyper+nstate+4])
### p = np.r_[p[0: nstate+nhyper], np.exp(p[nstate+nhyper:])]
#if not radon_conc_is_known:
# # special treatment for the radon concentration timeseries
# p_rn = p[nstate+nhyper:]
# radon_conc = inverse_transform_radon_concs(p_rn)
# p = np.r_[p[0: nstate+nhyper], radon_conc]
p = inverse_transform_parameters(p, parameters)
lp = lnprob(p,parameters)
if False:
f, axl = plt.subplots(1, 2, figsize=[4,1.5])
axl[0].plot(parameters['observed_counts'])
axl[0].plot(detector_model_specialised(p, parameters))
axl[0].set_title(lp)
axl[1].plot(radon_conc)
plt.show()
if (p.min() < 0) and False:
logger.error('Parameter less than 0 in minus_lnprob call')
hparams = p[nstate:nstate+nhyper]
logger.error(list(zip(parameters['variable_parameter_names'], hparams)))
logger.error([nstate, nhyper, np.where(p<0)])
if (p.min() < 0) and False:
f, axl = plt.subplots(1, 2, figsize=[4,1.5])
axl[0].plot(parameters['observed_counts'])
axl[0].plot(detector_model_specialised(p, parameters))
axl[0].set_title(lp)
axl[1].plot(radon_conc)
plt.show()
#print(p[nstate:nstate+nhyper], lp)
#print(lp)
if not np.isfinite(lp):
lp = -1e320
return -lp
from scipy.optimize import minimize
method = 'Powell'
# method = 'BFGS' # BFGS is not working
# use log radon conc in x0
### x0 = np.r_[ p[0: nstate+nhyper], np.log(p[nstate+nhyper:])]
#if not radon_conc_is_known:
# # special treatment for the radon concentration timeseries
# radon_conc = p[nstate+nhyper:]
# p_rn = transform_radon_concs(radon_conc)
# p = np.r_[p[0: nstate+nhyper], p_rn]
fixed_response_function = True
if fixed_response_function:
# a version of 'parameters' with the response function set to a fixed value
# (faster/easier MAP optimisation)
parameters_fixed_response_function = {}
parameters_fixed_response_function.update(parameters)
for ii,k in enumerate(parameters['variable_parameter_names']):
parameters_fixed_response_function[k] = parameters['variable_parameters_mu_prior'][ii]
parameters_fixed_response_function['variable_parameter_names'] = []
parameters_fixed_response_function['variable_parameters_mu_prior'] = []
parameters_fixed_response_function['variable_parameters_sigma_prior'] = []
parameters_fixed_response_function['variable_parameter_lower_bounds'] = []
parameters_fixed_response_function['variable_parameter_upper_bounds'] = []
parameters_fixed_response_function['nhyper'] = 0
map_params = parameters_fixed_response_function
map_p = p00_fixed_response.copy()
else:
map_params = parameters
map_p = p
#check that we can call this
x = transform_parameters(map_p, map_params)
logger.debug(f"minus_lnprob at MAP search initial location: {minus_lnprob(x, map_params)}")
with util.timewith(name=method):
x0 = transform_parameters(map_p, map_params)
logger.debug(f'x0: {x0}')
ret = minimize(minus_lnprob, x0=x0, args=(map_params,), method=method,
options=dict(maxiter=200,
maxfev=800000))
#print("MAP P0 log-prob:", lnprob(ret.x, map_params))
pmin = inverse_transform_parameters(ret.x, map_params)
### pmin = np.r_[pmin[0: nstate+nhyper], np.exp(pmin[nstate+nhyper:])]
logger.debug(f"MAP P0 log-prob: {lnprob(pmin, map_params)}")
logger.debug("MAP fitting results:")
ret.pop('direc') # too much output on screen
logger.debug(f"{ret}")
logger.debug(f'(from MAP) pmin: {pmin}')
y1 = detector_model_specialised(pmin, map_params)
y0 = observed_counts[1:]
y_ig = detector_model_specialised(map_p, map_params)
f, ax = plt.subplots()
ax.plot(y0, label='obs')
ax.plot(y1, label='model')
ax.plot(y_ig, label='model_guess_before_MAP')
ax.legend()
ax.set_title((pmin/map_p)[nstate:nstate+map_params['nhyper']])
# logger.debug("Parameters - MAP value / initial guess")
# logger.debug(list(zip(variable_parameter_names, (pmin/p)[nstate:nstate+nhyper])))
if figure_manager is not None:
figure_manager.save_figure(f, 'emcee-model-vs-obs')
map_radon_timeseries = []
## compare parameters with parameters_
#for k in parameters_.keys():
# print(k, parameters_[k], parameters[k])
if not radon_conc_is_known:
f, ax = plt.subplots()
ax.plot(pmin[nstate+map_params['nhyper']:]*lamrn, label='MAP estimate')
ax.plot(p00[nstate+nhyper:]*lamrn, label='RLTV deconvolution')
ax.legend()
ax.set_ylabel('Bq/m3')
if figure_manager is not None:
figure_manager.save_figure(f, 'emcee-MAP')
map_radon_timeseries = pmin[nstate+map_params['nhyper']:].copy()
# this needs to be expanded out like
# p = [state params] [response function params] [radon timeseries]
# but at the moment, pmin doesn't include the response function params
pmin_padded = np.r_[pmin[:nstate], parameters['variable_parameters_mu_prior'], pmin[nstate:]]
p = pmin_padded
#p = pmin.copy()
plt.close('all')
# restore the original value of this parameter
parameters['transform_radon_timeseries'] = transform_radon_timeseries
if not radon_conc_is_known and transform_radon_timeseries:
logger.info('Transforming radon timeseries for emcee sampling')
orig = p[nstate+nhyper:].copy()
fast_detector.transform_radon_concs_inplace(p[nstate+nhyper:])
#f, ax = plt.subplots()
#ax.plot(orig)
#f, ax = plt.subplots()
#ax.plot(p[nstate+nhyper:])
#plt.show()
check = fast_detector.inverse_transform_radon_concs(p[nstate+nhyper:])
if not np.allclose(orig, check):
logger.error("transformed radon concs do not match inverse")
logger.error("(orig,inv) pairs follow")
logger.error( [itm for itm in zip(orig, check)] )
assert False
# Number of walkers needs to be at least 2x number of dimensions
Ndim = len(p)
Nwalker = Ndim * walkers_per_dim
Nwalker = max(Nwalker, 60) # don't run with less than 60 walkers
# number of walkers must be even.
# increment to the next multiple of 4 (for, maybe, easier load balancing)
Nwalker += (4 - Nwalker % 4)
p00 = p.copy()
p0 = emcee.utils.sample_ball(p, std=p/1000.0, size=Nwalker)
# check that the lnprob function still works
logger.info(f"initial lnprob value: {lnprob(p, parameters)}")
logger.info(f"About to start emcee sampler. Parameters are: {parameters}")
from multiprocessing.pool import ThreadPool
if nthreads > 1:
pool = ThreadPool(nthreads)
else:
pool = None
with util.timewith("EMCEE Sampling"):
# sampler
sampler = emcee.EnsembleSampler(Nwalker,Ndim,lnprob,
args=(parameters,),
pool=pool,
a=2.0) #default value of a is 2.0
# burn-in
pos,prob,state = sampler.run_mcmc(p0, iterations,
store=keep_burn_in_samples, thin=thin)
# sample
pos,prob,state = sampler.run_mcmc(pos, iterations, thin=thin)
logger.info(f'EnsembleSampler mean acceptance fraction during sampling: {sampler.acceptance_fraction.mean()}')
#assert sampler.acceptance_fraction.mean() > 0.05
# restore the radon concentrations in sampler.chain to their true values
# (instead of the sequence of coefficients)
Nch, Nit, Np = sampler.chain.shape
if transform_radon_timeseries:
for ii in range(Nch):
for jj in range(Nit):
fast_detector.inverse_transform_radon_concs_inplace(
sampler.chain[ii, jj, nstate+nhyper:])
A = sampler.flatchain
# put the initial guess (MAP estimate) into the chain
A = np.vstack([pmin_padded, A])
mean_est = A.mean(axis=0)
low = np.percentile(A, 10.0, axis=0)
high = np.percentile(A, 90.0, axis=0)
return (sampler, A, mean_est, low, high, parameters, map_radon_timeseries,
rl_radon_timeseries, rltv_radon_timeseries)
def overlapping_chunk_dataframe_iterator(df, chunksize, overlap=0):
"""
A generator which produces an iterator over a dataframe with overlapping chunks
"""
ix0 = 0
ixstart = ix0+overlap
ixend = ixstart+chunksize
ix1 = ixend+overlap
while ix1 <= len(df):
yield df.iloc[ix0:ix1]
ix0+=chunksize
ixstart+=chunksize
ixend+=chunksize
ix1+=chunksize
return
def chunkwise_apply(df, chunksize, overlap, func, func_args=(), func_kwargs={},
nproc=1):
chunks = list(overlapping_chunk_dataframe_iterator(df, chunksize, overlap))
logger.info('chunkwise_apply...')
logger.info(f' input data length {len(df)}')
logger.info(f' split into {len(chunks)} of length {len(chunks[0])}')
chunk_id_list = list(range(0, len(chunks)))
if nproc == 1:
results = [func(itm, *func_args, **func_kwargs, _chunk_id=chunk_id) for itm,chunk_id in zip(chunks, chunk_id_list)]
else:
# parallel version
from joblib import Parallel, delayed
par = Parallel(n_jobs=nproc, verbose=50)
results = par(delayed(func)(itm, *func_args, _chunk_id=chunk_id, **func_kwargs)
for itm,chunk_id in zip(chunks,chunk_id_list))
# add chunk_id field
for itm, chunk_id in zip(results, chunk_id_list):
if itm is not None:
itm['chunk_id'] = chunk_id
# filter out results which encounted an error during processing
results = [itm for itm in results if itm is not None]
# strip the overlap
if overlap>0:
results = [itm.iloc[overlap:-overlap] for itm in results]
df_ret = pd.concat(results)
return df_ret
def emcee_deconvolve_tm(df, col_name='lld',
model_parameters={},
iterations=500, nthreads=1,
nproc=1,
keep_burn_in_samples=False, thin=1,
walkers_per_dim=3, chunksize=None, overlap=None, short_output=True,
stop_on_error=False,
dict_priors=None,
figdir=None,
_chunk_id=None,
):
"""
TODO: docstring
"""
if chunksize is not None:
assert overlap is not None
chunks = overlapping_chunk_dataframe_iterator(df, chunksize, overlap)
func_kwargs = dict(col_name=col_name,
model_parameters=model_parameters,
iterations=iterations,
nthreads=nthreads,
keep_burn_in_samples=keep_burn_in_samples,
thin=thin,
walkers_per_dim=walkers_per_dim,
figdir=figdir,
chunksize=None, overlap=None, short_output=True)
dfret = chunkwise_apply(df,
chunksize=chunksize,
overlap=overlap,
func=emcee_deconvolve_tm,
func_kwargs=func_kwargs,
nproc=nproc)
return dfret
try:
figure_manager = FigureManager(figdir, _chunk_id)
#
# default parameters for theoretical model of detector
#
rs = 0.76
parameters = dict(
Q = 0.0122,
rs = rs,
lamp = 1/180.0,
eff = 0.15,
Q_external = 40.0 / 60.0 / 1000.0,
V_delay = 200.0 / 1000.0,
V_tank = 750.0 / 1000.0,
recoil_prob = 0.5*(1-rs),
t_delay = 10.0,
total_efficiency=0.128,
total_efficiency_frac_error=0.05,
background_count_rate=1/60.0)
# handle missing values in air temperature timeseries
# --- this parameter doesn't have a strong effect on results, so it's reaonable to interpolate
if df['airt'].isnull().values.any():
airt_saved = df['airt'].values.copy()
df['airt'] = df['airt'].interpolate()
logger.warning(f"Missing values found in air temperature, filling with linear interpolation.\n was: {airt_saved}\n now: {df['airt'].values}")
# the internal airt history should already have been converted to K
internal_airt_history = df['airt'].values
if not internal_airt_history.min() > 200.0:
logger.error("'airt' needs to be in K at the observation time")
raise ValueError("'airt' needs to be in K at the observation time")
if not internal_airt_history.max() < 400:
logger.error("error in 'airt'")
raise ValueError('airt too high',internal_airt_history.max())
# update with prescribed parameters
parameters.update(model_parameters)
# some of the parameters might be specified as DataFrame columns
possible_params_from_dataframe = ['total_efficiency', 'background_count_rate',
'Q_external', 'Q']
params_from_dataframe = []
for param_name in possible_params_from_dataframe:
if is_string(parameters[param_name]):
column_name = parameters[param_name]
parameters[param_name] = df[column_name].mean()
params_from_dataframe.append(param_name)
logger.info(f"{param_name} from data: {parameters[param_name]}")
# detector overall efficiency - check it's close to the prescribed efficiency
# TODO: should eff be adjusted here?
rs = parameters['rs']
Y0eff = fast_detector.calc_steady_state(1/lamrn,
Q=parameters['Q'], rs=rs,
lamp=parameters['lamp'],
V_tank=parameters['V_tank'],
recoil_prob=0.5*(1-rs),
eff=parameters['eff'])
total_efficiency = Y0eff[-1]
logger.debug(f"computed total eff: {total_efficiency} prescribed: {parameters['total_efficiency']}")
if 'total_efficiency' in params_from_dataframe:
logger.debug('Adjusting "eff" parameter so that computed efficiency matches '+
'prescribed')
logger.debug(f" old value of eff: {parameters['eff']}")
parameters['eff'] = parameters['total_efficiency'] / total_efficiency * parameters['eff']
logger.debug(f" new value of eff: {parameters['eff']}")
# priors
if dict_priors is not None:
variable_parameter_names = dict_priors['variable_parameter_names']
variable_parameters_mu_prior = dict_priors[
'variable_parameters_mu_prior']
variable_parameters_sigma_prior = dict_priors[
'variable_parameters_sigma_prior']
parameters['variable_parameter_lower_bounds'] = dict_priors[
'variable_parameter_lower_bounds']
parameters['variable_parameter_upper_bounds'] = dict_priors[
'variable_parameter_upper_bounds']
else:
variable_parameter_names = 'Q_external', 'Q', 'rs', 'lamp', 't_delay', 'eff'
variable_parameters_mu_prior = np.array(
[parameters[k] for k in variable_parameter_names])
variable_parameters_sigma_prior = np.array([parameters['Q_external'] * 0.02,
parameters['Q']*0.2,
0.05,
2.0,
1.,
0.05*parameters['eff']])
parameters['variable_parameter_lower_bounds'] = np.array([0.0, 0.0, 0.0, 0.0, -np.inf, 0.0])
parameters['variable_parameter_upper_bounds'] = np.array([np.inf, np.inf, 2.0, np.inf, np.inf, np.inf])
# extract time in seconds
times = df.index.to_pydatetime()
tzero = times[0]
t = np.array([ (itm-tzero).total_seconds() for itm in times])
tres = t[1] - t[0]
with util.timewith("emcee deconvolution") as timer:
#if chunksize is None:
# print(df[col_name])
fit_ret = fit_parameters_to_obs(t, observed_counts=df[col_name].values,
internal_airt_history = internal_airt_history,
parameters=parameters,
variable_parameter_names = variable_parameter_names,
variable_parameters_mu_prior = variable_parameters_mu_prior,
variable_parameters_sigma_prior = variable_parameters_sigma_prior,
iterations=iterations,
thin=thin,
keep_burn_in_samples=keep_burn_in_samples,
nthreads=nthreads,
figure_manager=figure_manager)
(sampler, A, mean_est, low, high, parameters, map_radon_timeseries,
rl_radon_timeseries, rltv_radon_timeseries) = fit_ret
popt = A.mean(axis=0)
#varying parameters
params_chain = A[:, parameters['nstate']:parameters['nhyper']+parameters['nstate']]
#radon concentration
radon_conc_chain = A[:, parameters['nhyper']+parameters['nstate']:]
# initial state
b = sampler.chain[:, :, parameters['nhyper']+parameters['nstate']:]
#varying parameters as DataFrame
params_chain_df = pd.DataFrame(data = params_chain, columns=parameters['variable_parameter_names'])
#organise outputs into a DataFrame
mean_est = radon_conc_chain.mean(axis=0)
percentiles = np.percentile(radon_conc_chain, [10, 16, 50, 84, 90], axis=0)
# original counts scaled by net sensitivity
scfac = 1.0 / tres / parameters['total_efficiency'] / lamrn
scaled_obs = df[col_name] * scfac
d = {col_name + '_mean': mean_est,
col_name + '_map': map_radon_timeseries,
col_name + '_rl': rl_radon_timeseries * scfac,
col_name + '_rltv': rltv_radon_timeseries * scfac,
col_name + '_p10': percentiles[0],
col_name + '_p16': percentiles[1],
col_name + '_p50': percentiles[2],
col_name + '_p84': percentiles[3],
col_name + '_p90': percentiles[4],
col_name + '_scaled' : scaled_obs,
col_name + '_sampler_acceptance_fraction' : sampler.acceptance_fraction.mean()}
# average-over-sampling-period values (only if interpolation_mode==1)
if parameters['interpolation_mode'] == 1:
tmp = radon_conc_chain.copy()
# N_samples, N_times = tmp.shape
tmp[:, 1:] = (tmp[:, 1:] + tmp[:, :-1]) / 2.0
#tmp[0,:] = np.NaN
mean_est = tmp.mean(axis=0)
percentiles = np.percentile(tmp, [10, 16, 50, 84, 90], axis=0)
d[col_name + 'av_mean'] = mean_est
d[col_name + 'av_p10'] = percentiles[0]
d[col_name + 'av_p16'] = percentiles[1]
d[col_name + 'av_p50'] = percentiles[2]
d[col_name + 'av_p84'] = percentiles[3]
d[col_name + 'av_p90'] = percentiles[4]
# a bunch of samples from the distribution
N_samples = 1000 # TODO: make an argument
Ns,Nt = radon_conc_chain.shape
if N_samples>Ns:
N_samples = Ns
if N_samples > 0:
if parameters['interpolation_mode'] == 1:
instances = tmp
else:
instances = radon_conc_chain
take_idx = np.floor(np.linspace(0,Ns-1, N_samples)).astype(np.int)
sample_cols = []
for ii in range(N_samples):
k = col_name+'sample_'+str(ii)
sample_cols.append(k)
d[k] = instances[take_idx[ii]]
dfret = | pd.DataFrame(data=d, index=df.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from constants import *
import numpy as np
import pandas as pd
import utils
import time
from collections import deque, defaultdict
from scipy.spatial.distance import cosine
from scipy import stats
import math
seed = SEED
cur_stage = CUR_STAGE
mode = cur_mode
#used_recall_source = 'i2i_w02-b2b-i2i2i'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10'
#used_recall_source = 'i2i_w02-b2b-i2i2i-i2i_w10-i2i2b'
used_recall_source = cur_used_recall_source
sum_mode = 'nosum'
used_recall_source = used_recall_source+'-'+sum_mode
print( f'Recall Source Use {used_recall_source}')
def feat_item_sum_mean_sim_weight_loc_weight_time_weight_rank_weight(data):
df = data.copy()
df = df[ ['user','item','sim_weight','loc_weight','time_weight','rank_weight','index'] ]
feat = df[ ['index','user','item'] ]
df = df.groupby( ['user','item'] )[ ['sim_weight','loc_weight','time_weight','rank_weight'] ].agg( ['sum','mean'] ).reset_index()
cols = [ f'item_{j}_{i}' for i in ['sim_weight','loc_weight','time_weight','rank_weight'] for j in ['sum','mean'] ]
df.columns = [ 'user','item' ]+ cols
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ cols ]
return feat
def feat_sum_sim_loc_time_weight(data):
df = data.copy()
df = df[ ['index','sim_weight','loc_weight','time_weight'] ]
feat = df[ ['index'] ]
feat['sum_sim_loc_time_weight'] = df['sim_weight'] + df['loc_weight'] + df['time_weight']
feat = feat[ ['sum_sim_loc_time_weight'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_text_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_cossim'] ]
return feat
def feat_road_item_text_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text )
return a
else:
return np.nan
feat['road_item_text_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_eulasim'] ]
return feat
def feat_road_item_text_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text - item2_text, ord=1 )
return a
else:
return np.nan
feat['road_item_text_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_mansim'] ]
return feat
def feat_road_item_image_cossim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
c = np.dot( item1_image, item2_image )
a = np.linalg.norm( item1_image )
b = np.linalg.norm( item2_image )
return c/(a*b+(1e-9))
else:
return np.nan
feat['road_item_image_cossim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_cossim'] ]
return feat
def feat_road_item_image_eulasim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[1]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image )
return a
else:
return np.nan
feat['road_item_image_eulasim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_eulasim'] ]
return feat
def feat_road_item_image_mansim(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_image = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_image[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_image ) and ( item2 in item_image ):
item1_image = item_image[item1]
item2_image = item_image[item2]
a = np.linalg.norm( item1_image - item2_image, ord=1 )
return a
else:
return np.nan
feat['road_item_image_mansim'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_image_mansim'] ]
return feat
def feat_i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
i2i_sim_seq = {}
st0 = time.time()
tot = 0
for user, items in user_item_dict.items():
times = user_time_dict[user]
if tot % 500 == 0:
print( f'tot: {len(user_item_dict)}, now: {tot}' )
tot += 1
for loc1, item in enumerate(items):
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
if (item,relate_item) not in new_keys:
continue
t1 = times[loc1]
t2 = times[loc2]
i2i_sim_seq.setdefault((item,relate_item), [])
i2i_sim_seq[ (item,relate_item) ].append( (loc1, loc2, t1, t2, len(items) ) )
st1 = time.time()
print(st1-st0)
return i2i_sim_seq
def feat_i2i2i_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:50]:
res[i[0]]=i[1]
sim_item_p2[key] = res
i2i2i_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if item2 == item1:
continue
for item3 in sim_item_p2[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2i_sim_seq.setdefault((item1,item3), [])
i2i2i_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], sim_item_p2[item2][item3],
sim_item_p1[item1][item2], sim_item_p1[item2][item3] ) )
return i2i2i_sim_seq
def feat_i2i2b_seq(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
vals = feat[ ['road_item', 'item'] ].values
new_keys = set()
for val in vals:
new_keys.add( (val[0], val[1]) )
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
all_pair_num = 0
sim_item_p2 = {}
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item_p2.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
all_pair_num += 1
t1 = times[loc1]
t2 = times[loc2]
sim_item_p2[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item_p2[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
sim_item_p1 = {}
for i, related_items in sim_item_p2.items():
sim_item_p1[i] = {}
for j, cij in related_items.items():
sim_item_p1[i][j] = cij / (item_cnt[i] * item_cnt[j])
sim_item_p2[i][j] = cij / ((item_cnt[i] * item_cnt[j]) ** 0.2)
print('all_pair_num',all_pair_num)
for key in sim_item_p2.keys():
t = sim_item_p2[key]
t = sorted(t.items(), key=lambda d:d[1], reverse = True )
res = {}
for i in t[0:100]:
res[i[0]]=i[1]
sim_item_p2[key] = res
blend_sim = utils.load_sim(item_blend_sim_path)
blend_score = {}
for item in blend_sim:
i = item[0]
blend_score.setdefault(i,{})
for j,cij in item[1][:100]:
blend_score[i][j] = cij
i2i2b_sim_seq = {}
t1 = time.time()
for idx,item1 in enumerate( sim_item_p2.keys() ):
if idx%10000==0:
t2 = time.time()
print( f'use time {t2-t1} for 10000, now {idx} , tot {len(sim_item_p2.keys())}' )
t1 = t2
for item2 in sim_item_p2[item1].keys():
if (item2 == item1) or (item2 not in blend_score.keys()):
continue
for item3 in blend_score[item2].keys():
if item3 == item1 or item3 == item2:
continue
if (item1,item3) not in new_keys:
continue
i2i2b_sim_seq.setdefault((item1,item3), [])
i2i2b_sim_seq[ (item1,item3) ].append( ( item2, sim_item_p2[item1][item2], blend_score[item2][item3],
sim_item_p1[item1][item2], blend_score[item2][item3] ) )
return i2i2b_sim_seq
def feat_i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
for key in new_keys:
if np.isnan( result[key] ):
continue
result[key] = result[key] / ((item_cnt[key[0]] * item_cnt[key[1]]) ** 0.2)
print('Finished getting result')
feat['i2i_sim'] = feat['new_keys'].map(result)
#import pdb
#pdb.set_trace()
#i2i_seq_feat = pd.concat( [feat,i2i_seq_feat], axis=1 )
#i2i_seq_feat['itemAB'] = i2i_seq_feat['road_item'].astype('str') + '-' + i2i_seq_feat['item'].astype('str')
feat = feat[ ['i2i_sim'] ]
return feat
def feat_i2i_sim_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += loc_weight
feat['i2i_sim_abs_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_abs_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
loc_diff = loc1-loc2
loc_weight = (loc_base**loc_diff)
if abs(loc_weight) <= 0.2:
if loc_weight > 0:
loc_weight = 0.2
else:
loc_weight = -0.2
result[key] += loc_weight
feat['i2i_sim_loc_weights_loc_base'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_sim_loc_weights_loc_base'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_sim_abs_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
result[key] += time_weight
feat['i2i_sim_abs_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_sim_time_weights(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - (t1 - t2) * 100)
if abs(time_weight)<=0.2:
if time_weight > 0:
time_weight = 0.2
else:
time_weight = -0.2
result[key] += time_weight
feat['i2i_sim_time_weights'] = feat['new_keys'].map(result)
print('Finished getting result')
cols = [ 'i2i_sim_time_weights' ]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
time_weight = (1 - abs(t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = abs(loc2-loc1)
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
if loc1-loc2>0:
result[key] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
else:
result[key] -= 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)
feat['i2i_cijs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_cijs_mean_abs_loc_weights_loc_base(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
loc_bases = [0.2,0.4,0.6,0.8,1.0]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = 0.0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key] += ( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len) ) / len(records)
feat['i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for loc_base in loc_bases:
cols.append( 'i2i_cijs_mean_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_sum_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] + item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_sum_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_sum_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_i2i_bottom_itemcnt_multi_weight(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
#print('Loading i2i_sim_seq')
#i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
#print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
weights = [0.2,0.4,0.6,0.8,1.0]
for weight in weights:
print(f'Starting {weight}')
result = {}
for key in new_keys:
if (key[0] in item_cnt.keys()) and (key[1] in item_cnt.keys()):
result[key] = ((item_cnt[key[0]] * item_cnt[key[1]]) ** weight)
feat['i2i_bottom_itemcnt_multi_weight_'+str(weight)] = feat['new_keys'].map(result)
print('Finished getting result')
cols = []
for weight in weights:
cols.append( 'i2i_bottom_itemcnt_multi_weight_'+str(weight) )
feat = feat[ cols ]
return feat
def feat_b2b_sim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
blend_sim = utils.load_sim(item_blend_sim_path)
b2b_sim = {}
for item in blend_sim:
i = item[0]
b2b_sim.setdefault(i,{})
for j,cij in item[1][:100]:
b2b_sim[i][j] = cij
vals = feat[ ['road_item','item'] ].values
result = []
for val in vals:
item1 = val[0]
item2 = val[1]
if item1 in b2b_sim.keys():
if item2 in b2b_sim[item1].keys():
result.append( b2b_sim[ item1 ][ item2 ] )
else:
result.append( np.nan )
else:
result.append( np.nan )
feat['b2b_sim'] = result
feat = feat[ ['b2b_sim'] ]
return feat
def feat_itemqa_loc_diff(data):
df = data.copy()
feat = df[ ['index','query_item_loc','road_item_loc'] ]
feat['itemqa_loc_diff'] = feat['road_item_loc'] - feat['query_item_loc']
def func(s):
if s<0:
return -s
return s
feat['abs_itemqa_loc_diff'] = feat['itemqa_loc_diff'].apply(func)
feat = feat[ ['itemqa_loc_diff','abs_itemqa_loc_diff'] ]
return feat
def feat_sim_three_weight(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean'] = feat['com_item_loc_weights_sum'] / feat['com_item_cnt']
feat['com_item_time_weights_mean'] = feat['com_item_time_weights_sum'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum','com_item_time_weights_sum','com_item_record_weights_sum',
'com_item_loc_weights_mean','com_item_time_weights_mean','com_item_record_weights_mean' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
feat['user_item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
for col in ['i2i_score','blend_score','i2i2i_score']:
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user_item',col+'_sum'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
df = feat[ ['user_item',col,'index'] ]
df = df.groupby('user_item')[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user_item',col+'_mean'] ]
feat = pd.merge( feat, df, on='user_item', how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_different_type_road_score_sum_mean_new(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
recall_source_names = ['i2i_w02','b2b','i2i2i','i2i_w10','i2i2b']
recall_source_names = [ i+'_score' for i in recall_source_names ]
for idx,col in enumerate(recall_source_names):
feat[col] = feat['sim_weight']
feat.loc[ feat['recall_type']!=idx, col ] = np.nan
for col in recall_source_names:
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].sum().reset_index()
df[col+'_sum'] = df[col]
df = df[ ['user','item',col+'_sum'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
df = feat[ ['user','item',col,'index'] ]
df = df.groupby( ['user','item'] )[col].mean().reset_index()
df[col+'_mean'] = df[col]
df = df[ ['user','item',col+'_mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat_list = recall_source_names + [ col+'_sum' for col in recall_source_names ] + [ col+'_mean' for col in recall_source_names ]
feat = feat[ feat_list ]
return feat
def feat_sim_base(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
road_item_cnt = np.zeros( num, dtype=float )
t_item_cnt = np.zeros( num, dtype=float )
com_item_cij = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
road_item_cnt[i] = item_cnt[ road_item[i] ]
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_cij[i] = sim_item[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
road_item_cnt[i] = np.nan
com_item_cij[i] = np.nan
t_com_item_cnt[i] = np.nan
if t_item[i] in item_set:
t_item_cnt[i] = item_cnt[ t_item[i] ]
else:
t_item_cnt[i] = np.nan
feat['road_item_cnt'] = road_item_cnt
feat['item_cnt'] = t_item_cnt
feat['com_item_cij'] = com_item_cij
feat['com_item_cnt'] = t_com_item_cnt
feat = feat[ ['road_item_cnt','item_cnt','com_item_cij','com_item_cnt' ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_abs_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if loc_weight<=0.1:
loc_weight = 0.1
result.append(loc_weight)
feat['u2i_abs_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_abs_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_loc_weights_loc_base(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_loc','road_item_loc'] ].values
loc_bases = [0.1,0.3,0.5,0.7,0.9]
for loc_base in loc_bases:
result = []
for val in vals:
loc1 = val[0]
loc2 = val[1]
if loc2 >= loc1:
loc_diff = loc2-loc1
else:
loc_diff = loc1-loc2-1
loc_weight = loc_base**loc_diff
if abs(loc_weight)<=0.1:
loc_weight = 0.1
if loc2 < loc1:
loc_weight = -loc_weight
result.append(loc_weight)
feat['u2i_loc_weights_loc_base_'+str(loc_base)] = result
cols = []
for loc_base in loc_bases:
cols.append( 'u2i_loc_weights_loc_base_'+str(loc_base) )
feat = feat[ cols ]
return feat
def feat_u2i_abs_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if time_weight<=0.1:
time_weight = 0.1
result.append(time_weight)
feat['u2i_abs_time_weights'] = result
cols = [ 'u2i_abs_time_weights' ]
feat = feat[ cols ]
return feat
def feat_u2i_time_weights(data):
df = data.copy()
feat = df[ ['road_item','item','query_item_loc','query_item_time','road_item_loc','road_item_time'] ]
vals = feat[ ['query_item_time','road_item_time'] ].values
result = []
for val in vals:
t1 = val[0]
t2 = val[1]
time_weight = (1 - abs( t1 - t2 ) * 100)
if abs(time_weight)<=0.1:
time_weight = 0.1
if t1 > t2:
time_weight = -time_weight
result.append(time_weight)
feat['u2i_time_weights'] = result
cols = [ 'u2i_time_weights' ]
feat = feat[ cols ]
return feat
def feat_automl_cate_count(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'road_item','item','road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_automl_user_cate_count(data):
df = data.copy()
feat = df[ ['index','user','road_item','item'] ]
feat['user-road_item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str')
feat['user-item'] = feat['user'].astype('str') + '-' + feat['item'].astype('str')
feat['user-road_item-item'] = feat['user'].astype('str') + '-' + feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cate_list = [ 'user-road_item','user-item','user-road_item-item' ]
cols = []
for cate in cate_list:
feat[cate+'_count'] = feat[ cate ].map( feat[ cate ].value_counts() )
cols.append( cate+'_count' )
feat = feat[ cols ]
return feat
def feat_u2i_road_item_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time']]
feat = feat.groupby(['user','road_item_loc']).first().reset_index()
feat_group = feat.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_group['road_item_time'].diff(1)
feat2 = feat_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_time_diff_history'
feat2.name = 'u2i_road_item_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2],axis=1),how='left',on=['user','road_item_loc'])
cols = [ 'u2i_road_item_time_diff_history', 'u2i_road_item_time_diff_future' ]
feat = feat[ cols ]
return feat
def feat_road_item_text_dot(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
c = np.dot( item1_text, item2_text )
return c
else:
return np.nan
feat['road_item_text_dot'] = df[ ['road_item','item'] ].apply(func, axis=1)
feat = feat[ ['road_item_text_dot'] ]
return feat
def feat_road_item_text_norm2(data):
df = data.copy()
df = df[ ['index','road_item','item'] ]
feat = df[ ['index'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
for k,v in item_feat.items():
item_text[k] = v[0]
def func1(ss):
item1 = ss['road_item']
item2 = ss['item']
if ( item1 in item_text ) and ( item2 in item_text ):
item1_text = item_text[item1]
item2_text = item_text[item2]
a = np.linalg.norm( item1_text )
b = np.linalg.norm( item2_text )
return a*b
else:
return np.nan
def func2(ss):
item1 = ss
if ( item1 in item_text ):
item1_text = item_text[item1]
a = np.linalg.norm( item1_text )
return a
else:
return np.nan
feat['road_item_text_product_norm2'] = df[ ['road_item','item'] ].apply(func1, axis=1)
feat['road_item_text_norm2'] = df['road_item'].apply(func2)
feat['item_text_norm2'] = df['item'].apply(func2)
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_automl_cate_count_all_1(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
feat[cate1+'_count_'] = feat[cate1].map( feat[cate1].value_counts() )
cols.append( cate1+'_count_' )
print(f'feat {cate1} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_2(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
name2 = f'{cate1}_{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_cate_count_all_3(data):
df = data.copy()
categories = [ 'user','item','road_item','road_item_loc',
'query_item_loc','recall_type']
feat = df[ ['index']+categories ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categories += ['loc_diff']
n = len(categories)
cols = []
for a in range(n):
cate1 = categories[a]
for b in range(a+1,n):
cate2 = categories[b]
for c in range(b+1,n):
cate3 = categories[c]
name3 = f'{cate1}_{cate2}_{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count_'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count_' )
print(f'feat {feat_tmp.name} fuck done')
feat = feat[ cols ]
return feat
def feat_time_window_cate_count(data):
if mode=='valid':
all_train_data = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
all_train_data = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_with_time = all_train_data[["item_id", "time"]].sort_values(["item_id", "time"])
item2time = item_with_time.groupby("item_id")["time"].agg(list).to_dict()
utils.dump_pickle(item2time, item2time_path.format(mode))
item2times = utils.load_pickle(item2time_path.format(mode))
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
]]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
try:
item2times = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
feat["item_cnt_around_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.01))
feat["item_cnt_before_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.01))
feat["item_cnt_after_time_0.01"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.01))
feat["item_cnt_around_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.02))
feat["item_cnt_before_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.02))
feat["item_cnt_after_time_0.02"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.02))
feat["item_cnt_around_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.05))
feat["item_cnt_before_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.05))
feat["item_cnt_after_time_0.05"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.05))
feat["item_cnt_around_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.07))
feat["item_cnt_before_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.07))
feat["item_cnt_after_time_0.07"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.07))
feat["item_cnt_around_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.1))
feat["item_cnt_before_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.1))
feat["item_cnt_after_time_0.1"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.1))
feat["item_cnt_around_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="all", delta=0.15))
feat["item_cnt_before_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="left", delta=0.15))
feat["item_cnt_after_time_0.15"] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode="right", delta=0.15))
return feat[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
#在召回集内,限定时间(qtime 附近) 这个item被召回了多少次
# item2times 改变了 其他的逻辑不变
def item_recall_cnt_around_qtime(data):
item2times = data.groupby("item")["time"].agg(list).to_dict()
df = data.copy()
df["item_time"] = df.set_index(["item", "time"]).index
feat = df[["item_time"]]
del df
def find_count_around_time(item_time, mode, delta):
item, t = item_time
if mode == "left":
left = t - delta
right = t
elif mode == "right":
left = t
right = t + delta
else:
left = t - delta
right = t + delta
click_times = item2times[item]
count = 0
for ts in click_times:
if ts < left:
continue
elif ts > right:
break
else:
count += 1
return count
new_cols = []
new_col_name = "item_recall_cnt_{}_time_{}"
for delta in [0.01, 0.02, 0.05, 0.07, 0.1, 0.15]:
print('running delta: ', delta)
for mode in ["all", "left", "right"]:
new_col = new_col_name.format(mode, delta)
new_cols.append(new_col)
feat[new_col] = feat["item_time"].apply(lambda x: find_count_around_time(x, mode=mode, delta=delta))
return feat[new_cols]
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['road_item-item'] = feat['road_item'].astype('str')+ '-' + feat['item'].astype('str')
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type','road_item-item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
feat['road_item-item'] = feat['road_item'].astype('str') + '-' + feat['item'].astype('str')
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item','road_item-item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_topk_by_loc = {}
result_history_loc_diff1_cnt = {}
result_future_loc_diff1_cnt = {}
result_history_loc_diff1_time_mean = {}
result_future_loc_diff1_time_mean = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
result_history_loc_diff1_cnt[key] = 0.0
result_future_loc_diff1_cnt[key] = 0.0
result_history_loc_diff1_time_mean[key] = 0
result_future_loc_diff1_time_mean[key] = 0
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result_history_loc_diff1_cnt[key] += 1
result_history_loc_diff1_time_mean[key] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result_future_loc_diff1_cnt[key] += 1
result_future_loc_diff1_time_mean[key] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)))
result_history_loc_diff1_time_mean[key] /=(result_history_loc_diff1_cnt[key]+1e-5)
result_future_loc_diff1_time_mean[key] /=(result_future_loc_diff1_cnt[key]+1e-5)
result_one = sorted(result[key],key=lambda x:x[0])
result_one_len = len(result_one)
result_topk_by_loc[key] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
feat['history_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_history_loc_diff1_time_mean).fillna(0)
feat['future_loc_diff1_com_item_time_mean'] = feat['new_keys'].map(result_future_loc_diff1_time_mean).fillna(0)
feat['history_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_history_loc_diff1_cnt).fillna(0)
feat['future_loc_diff1_com_item_cnt'] = feat['new_keys'].map(result_future_loc_diff1_cnt).fillna(0)
feat_top = []
for key,value in result_topk_by_loc.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_bases = [0.9]
for loc_base in loc_bases:
print(f'Starting {loc_base}')
result = {}
result_median = {}
result_mean = {}
result_topk = {}
for key in new_keys:
if key not in i2i_sim_seq.keys():
result[key] = np.nan
continue
result[key] = []
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[key].append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result[key],reverse=True)
result_one_len = len(result_one)
result_median[key] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result_mean[key] = sum(result[key])/len(result[key])
result_topk[key] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
feat['i2i_cijs_median'] = feat['new_keys'].map(result_median)
feat['i2i_cijs_mean'] = feat['new_keys'].map(result_mean)
feat_top = []
for key,value in result_topk.items():
feat_top.append([key[0],key[1]]+value)
feat_top = pd.DataFrame(feat_top,columns=['road_item','item']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)])
feat = feat.merge(feat_top,how='left',on=['road_item','item'])
print('Finished getting result')
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean_by_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#,'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].sum().reset_index()
df[col+'_by_item_sum'] = df[col]
df = df[ ['item',col+'_by_item_sum'] ]
feat = pd.merge( feat, df, on='item', how='left')
df = feat[ ['item',col,'index'] ]
df = df.groupby('item')[col].mean().reset_index()
df[col+'_by_item_mean'] = df[col]
df = df[ ['item',col+'_by_item_mean'] ]
feat = pd.merge( feat, df, on='item', how='left')
feat = feat[[f'{i}_by_item_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_different_type_road_score_mean_by_road_item(data):
df = data.copy()
feat = df[ ['user','road_item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score']#'i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['road_item',col,'index'] ]
df = df.groupby('road_item')[col].mean().reset_index()
df[col+'_by_road_item_mean'] = df[col]
df = df[ ['road_item',col+'_by_road_item_mean'] ]
feat = pd.merge( feat, df, on='road_item', how='left')
feat = feat[[f'{i}_by_road_item_mean' for i in cols]]
return feat
def feat_different_type_road_score_mean_by_loc_diff(data):
df = data.copy()
feat = df[ ['user','index','sim_weight','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['loc_diff',col,'index'] ]
df = df.groupby('loc_diff')[col].mean().reset_index()
df[col+'_by_loc_diff_mean'] = df[col]
df = df[ ['loc_diff',col+'_by_loc_diff_mean'] ]
feat = pd.merge( feat, df, on='loc_diff', how='left')
feat = feat[[f'{i}_by_loc_diff_mean' for i in cols]]
return feat
def feat_different_type_road_score_sum_mean_by_recall_type_and_item(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
cols = ['i2i_score','blend_score','i2i2i_score','i2iw10_score','i2i2b_score']
for i in range(len(cols)):
feat[cols[i]] = feat['sim_weight']
feat.loc[ feat['recall_type']!=i,cols[i] ] = np.nan
for col in cols:
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].sum().reset_index()
df[col+'_by_item-recall_type_sum'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_sum'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
df = feat[ ['item','recall_type',col,'index'] ]
df = df.groupby(['item','recall_type'])[col].mean().reset_index()
df[col+'_by_item-recall_type_mean'] = df[col]
df = df[ ['item','recall_type',col+'_by_item-recall_type_mean'] ]
feat = pd.merge( feat, df, on=['item','recall_type'], how='left')
feat = feat[[f'{i}_by_item-recall_type_{j}' for i in cols for j in ['sum','mean']]]
return feat
def feat_base_info_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
#all_train_stage_data = pd.concat( all_train_stage_data.iloc[0:1000], all_train_stage_data.iloc[-10000:] )
df_train_stage = all_train_stage_data
df = data.copy()
feat = df[ ['index','road_item','item','stage'] ]
stage2sim_item = {}
stage2item_cnt = {}
stage2com_item_cnt = {}
for sta in range(cur_stage+1):
df_train = df_train_stage[ df_train_stage['stage']==sta ]
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
sim_item = {}
item_cnt = defaultdict(int)
com_item_cnt = {}
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
item_cnt[item] += 1
sim_item.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
t1 = times[loc1]
t2 = times[loc2]
sim_item[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
sim_item[item][relate_item] += 1 * 1.0 * loc_weight * time_weight / math.log(1 + len(items))
com_item_cnt[item][relate_item] += 1.0
stage2sim_item[sta] = sim_item
stage2item_cnt[sta] = item_cnt
stage2com_item_cnt[sta] = com_item_cnt
sta_list = []
itemb_list = []
sum_sim_list = []
count_sim_list = []
mean_sim_list = []
nunique_itema_count_list = []
for sta in range(cur_stage+1):
for key1 in stage2sim_item[sta].keys():
val = 0
count = 0
for key2 in stage2sim_item[sta][key1].keys():
val += stage2sim_item[sta][key1][key2]
count += stage2com_item_cnt[sta][key1][key2]
sta_list.append( sta )
itemb_list.append( key1 )
sum_sim_list.append( val )
count_sim_list.append( count )
mean_sim_list.append( val/count )
nunique_itema_count_list.append( len( stage2sim_item[sta][key1].keys() ) )
data1 = pd.DataFrame( {'stage':sta_list, 'item':itemb_list, 'sum_sim_in_stage':sum_sim_list, 'count_sim_in_stage':count_sim_list,
'mean_sim_in_stage':mean_sim_list, 'nunique_itema_count_in_stage':nunique_itema_count_list } )
'''
sta_list = []
item_list = []
cnt_list = []
for sta in range(cur_stage+1):
for key1 in stage2item_cnt[sta].keys():
sta_list.append(sta)
item_list.append(key1)
cnt_list.append( stage2item_cnt[sta][key1] )
data2 = pd.DataFrame( {'stage':sta_list, 'road_item':item_list, 'stage_road_item_cnt':cnt_list } )
data3 = pd.DataFrame( {'stage':sta_list, 'item':item_list, 'stage_item_cnt':cnt_list } )
'''
#feat = pd.merge( feat,data1, how='left',on=['stage','road_item','item'] )
#feat = pd.merge( feat,data2, how='left',on=['stage','road_item'] )
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat = feat[ ['sum_sim_in_stage','count_sim_in_stage','mean_sim_in_stage','nunique_itema_count_in_stage'] ]
return feat
def feat_item_time_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','stage','time'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','item_id'] )['time'].agg( ['max','min','mean'] ).reset_index()
data1.columns = [ 'stage','item','time_max_in_stage','time_min_in_stage','time_mean_in_stage' ]
data1['time_dura_in_stage'] = data1['time_max_in_stage'] - data1['time_min_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','item'] )
feat['time_diff_min_in_stage'] = feat['time'] - feat['time_min_in_stage']
feat['time_diff_max_in_stage'] = feat['time_max_in_stage'] - feat['time']
cols = [ 'time_dura_in_stage','time_max_in_stage','time_min_in_stage','time_mean_in_stage','time_diff_min_in_stage','time_diff_max_in_stage' ]
feat = feat[ cols ]
return feat
def feat_user_info_in_stage(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train_stage = all_train_stage_data
data1 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data1.name = 'user_count_in_stage'
data1 = data1.reset_index()
data1 = data1.rename( columns={'user_id':'user'} )
data2 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].nunique()
data2.name = 'item_nunique_in_stage'
data2 = data2.reset_index()
data2 = data2.rename( columns={'item_id':'item'} )
data3 = df_train_stage.groupby( ['stage','item_id'] )['user_id'].count()
data3.name = 'item_count_in_stage'
data3 = data3.reset_index()
data3 = data3.rename( columns={'item_id':'item'} )
data3[ 'item_ratio_in_stage' ] = data3[ 'item_count_in_stage' ] / data2['item_nunique_in_stage']
feat = pd.merge( feat,data1, how='left',on=['stage','user'] )
feat = pd.merge( feat,data2, how='left',on=['stage','item'] )
feat = pd.merge( feat,data3, how='left',on=['stage','item'] )
cols = [ 'user_count_in_stage','item_nunique_in_stage','item_ratio_in_stage' ]
feat = feat[ cols ]
return feat
def feat_item_com_cnt_in_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["road_item", "stage"]]
feat["head"] = feat.set_index(["road_item", "stage"]).index
feat["itema_cnt_in_stage"] = feat["head"].map(item_stage_cnt)
return feat[["itema_cnt_in_stage"]]
def item_cnt_in_stage2(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
return feat[["item_stage_cnt"]]
def feat_item_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["item"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
item_stage_cnt = train_stage_data.groupby(['item_id'])['index'].count()
item_stage_cnt.name = f"item_stage_cnt_{sta}"
item_stage_cnt = item_stage_cnt.reset_index()
item_stage_cnt.columns = ['item',f"item_stage_cnt_{sta}"]
feat = pd.merge( feat,item_stage_cnt,how='left',on='item' )
cols.append( f"item_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_cnt_in_different_stage(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
feat = data[["user"]]
cols = []
for sta in range(cur_stage+1):
train_stage_data = all_train_stage_data[ all_train_stage_data['stage']==sta ]
user_stage_cnt = train_stage_data.groupby(['user_id'])['index'].count()
user_stage_cnt.name = f"user_stage_cnt_{sta}"
user_stage_cnt = user_stage_cnt.reset_index()
user_stage_cnt.columns = ['user',f"user_stage_cnt_{sta}"]
feat = pd.merge( feat,user_stage_cnt,how='left',on='user' )
cols.append( f"user_stage_cnt_{sta}" )
#import pdb
#pdb.set_trace()
return feat[ cols ]
def feat_user_and_item_count_in_three_init_data(data):
df = data.copy()
feat = df[ ['index','item','user','stage'] ]
if mode=='valid':
df_train_stage = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
else:
df_train_stage = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
data1 = df_train_stage.groupby( ['stage','item_id'] )['index'].count()
data1.name = 'in_stage_item_count'
data1 = data1.reset_index()
data1 = data1.rename( columns = {'item_id':'item'} )
data2 = df_train_stage.groupby( ['stage','user_id'] )['index'].count()
data2.name = 'in_stage_user_count'
data2 = data2.reset_index()
data2 = data2.rename( columns = {'user_id':'user'} )
data3 = df_train_stage.groupby( ['item_id'] )['index'].count()
data3.name = 'no_in_stage_item_count'
data3 = data3.reset_index()
data3 = data3.rename( columns = {'item_id':'item'} )
data4 = df_train_stage.groupby( ['user_id'] )['index'].count()
data4.name = 'no_in_stage_user_count'
data4 = data4.reset_index()
data4 = data4.rename( columns = {'user_id':'user'} )
data5 = df_train.groupby( ['item_id'] )['index'].count()
data5.name = 'no_stage_item_count'
data5 = data5.reset_index()
data5 = data5.rename( columns = {'item_id':'item'} )
data6 = df_train.groupby( ['user_id'] )['index'].count()
data6.name = 'no_stage_user_count'
data6 = data6.reset_index()
data6 = data6.rename( columns = {'user_id':'user'} )
feat = pd.merge( feat,data1,how='left',on=['stage','item'] )
feat = pd.merge( feat,data2,how='left',on=['stage','user'] )
feat = pd.merge( feat,data3,how='left',on=['item'] )
feat = pd.merge( feat,data4,how='left',on=['user'] )
feat = pd.merge( feat,data5,how='left',on=['item'] )
feat = pd.merge( feat,data6,how='left',on=['user'] )
cols = [ 'in_stage_item_count','in_stage_user_count','no_in_stage_item_count','no_in_stage_user_count','no_stage_item_count','no_stage_user_count' ]
return feat[ cols ]
#def feat_item_count_in_three_init_data(data):
def feat_i2i2i_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2i_sim_seq')
i2i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2i_sim_seq.keys():
continue
records = i2i2i_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2i_road_cnt','i2i2i_score1_mean','i2i2i_score2_mean','i2i2i_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_i2i2b_sim(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i2b_sim_seq')
i2i2b_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i2b_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i2b_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
result = np.zeros((len(new_keys),4))
item_cnt = df_train['item_id'].value_counts().to_dict()
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i2b_sim_seq.keys():
continue
records = i2i2b_sim_seq[key]
result[i,0] = len(records)
if len(records)==0:
print(key)
for record in records:
item,score1_1,score1_2,score2_1,score2_2 = record
result[i,1] += score1_1*score1_2
result[i,2] += score2_1*score2_2
result[i,3] += item_cnt[item]
result[:,1]/=(result[i,0]+1e-9)
result[:,2]/=(result[i,0]+1e-9)
result[:,3]/=(result[i,0]+1e-9)
print('Finished getting result')
cols = ['i2i2b_road_cnt','i2i2b_score1_mean','i2i2b_score2_mean','i2i2b_middle_item_cnt_mean']
result = pd.DataFrame(result,index=new_keys,columns=cols)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
feat = feat[ cols ]
return feat
def feat_numerical_groupby_item_cnt_in_stage(data):
df = data.copy()
num_cols = [ 'sim_weight', 'loc_weight', 'time_weight', 'rank_weight' ]
cate_col = 'item_stage_cnt'
feat = df[ ['index','road_item','item'] ]
feat1 = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'item_cnt_in_stage2_{mode}_{cur_stage}.pkl') )
df[ cate_col ] = feat1[ cate_col ]
feat[ cate_col ] = feat1[ cate_col ]
cols = []
for col in num_cols:
t = df.groupby(cate_col)[col].agg( ['mean','max','min'] )
cols += [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t.columns = [ f'{col}_{i}_groupby_{cate_col}' for i in ['mean','max','min'] ]
t = t.reset_index()
feat = pd.merge( feat, t, how='left', on=cate_col )
return feat[ cols ]
#i2i_score,
#
def feat_item_stage_nunique(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_nunique = all_train_stage_data.groupby(["item_id"])["stage"].nunique()
feat = data[["item"]]
feat["item_stage_nunique"] = feat["item"].map(item_stage_nunique)
return feat[["item_stage_nunique"]]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result_history = np.zeros(df_v.shape[0])*np.nan
result_future = np.zeros(df_v.shape[0])*np.nan
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = [0]+item_time_list[df_v[i,0]]+[1]
for j in range(1,len(time_list)):
if time<time_list[j]:
result_future[i] = time_list[j]-time
result_history[i] = time-time_list[j-1]
break
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
result = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = df_v[i,1]
time_list = item_time_list[df_v[i,0]]+[1]
for j in range(len(time_list)):
if time<time_list[j]:
result[i] = j
break
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_road_time_bins_cate_cnt(data):
df = data.copy()
categoricals = ['item','road_item','user','recall_type']
feat = df[['road_item_time']+categoricals]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
categoricals.append('loc_diff')
feat['road_time_bins'] = pd.Categorical(pd.cut(feat['road_item_time'],100)).codes
cols = []
for cate in categoricals:
cnt = feat.groupby([cate,'road_time_bins']).size()
cnt.name = f'{cate}_cnt_by_road_time_bins'
cols.append(cnt.name)
feat = feat.merge(cnt,how='left',on=[cate,'road_time_bins'])
return feat[cols]
def feat_time_window_cate_count(data):
# 做这个特征之前,先做一次item2time.py
import time as ti
t = ti.time()
df = data.copy()
feat = df[['item','query_item_time']]
df_v = feat.values
del df
try:
item_time_list = utils.load_pickle(item2time_path.format(mode, cur_stage))
except:
raise Exception("做这个特征之前,先做一次item2time.py")
delta_list = np.array(sorted([0.01, 0.02, 0.05, 0.07, 0.1, 0.15]))
delta_list2 = delta_list[::-1]
delta_n = delta_list.shape[0]
n = delta_n*2+1
result_tmp = np.zeros((df_v.shape[0],n))
result_equal = np.zeros(df_v.shape[0])
for i in range(df_v.shape[0]):
time = np.ones(n)*df_v[i,1]
time[:delta_n] -= delta_list2
time[-delta_n:] += delta_list
time_list = item_time_list[df_v[i,0]]+[10]
k = 0
for j in range(len(time_list)):
while k<n and time[k]<time_list[j] :
result_tmp[i,k] = j
k += 1
if time[delta_n]==time_list[j]:
result_equal[i] += 1
result_tmp[i,k:] = j
if i%100000 == 0:
print(f'[{i}/{df_v.shape[0]}]:time {ti.time()-t:.3f}s')
t = ti.time()
result = np.zeros((df_v.shape[0],delta_n*3))
for i in range(delta_n):
result[:,i*3+0] = result_tmp[:,delta_n] - result_tmp[:,i]
result[:,i*3+1] = result_tmp[:,-(i+1)] - result_tmp[:,delta_n] + result_equal
result[:,i*3+2] = result_tmp[:,-(i+1)] - result_tmp[:,i]
cols = [f'item_cnt_{j}_time_{i}' for i in delta_list2 for j in ['before','after','around']]
result = pd.DataFrame(result,columns=cols)
result = result[[
"item_cnt_around_time_0.01", "item_cnt_before_time_0.01", "item_cnt_after_time_0.01",
"item_cnt_around_time_0.02", "item_cnt_before_time_0.02", "item_cnt_after_time_0.02",
"item_cnt_around_time_0.05", "item_cnt_before_time_0.05", "item_cnt_after_time_0.05",
"item_cnt_around_time_0.07", "item_cnt_before_time_0.07", "item_cnt_after_time_0.07",
"item_cnt_around_time_0.1", "item_cnt_before_time_0.1", "item_cnt_after_time_0.1",
"item_cnt_around_time_0.15", "item_cnt_before_time_0.15", "item_cnt_after_time_0.15",
]]
return result
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = max(item_feat.keys())+1
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_road_item_text_cossim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
result = np.divide(result,item_l2[feat['road_item']]*item_l2[feat['item']]+1e-9)
feat['road_item_text_cossim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_cossim'] = np.nan
return feat[['road_item_text_cossim']]
def feat_road_item_text_eulasim(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.linalg.norm(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:]-item_np[feat['item'][i*batch_size:(i+1)*batch_size],:],axis=1)
feat['road_item_text_eulasim'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_eulasim'] = np.nan
return feat[['road_item_text_eulasim']]
def feat_road_item_text_dot(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros(n)
batch_size = 100000
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
for i in range(batch_num):
result[i*batch_size:(i+1)*batch_size] = np.multiply(item_np[feat['road_item'][i*batch_size:(i+1)*batch_size],:],item_np[feat['item'][i*batch_size:(i+1)*batch_size],:]).sum(axis=1)
feat['road_item_text_dot'] = result
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_dot'] = np.nan
return feat[['road_item_text_dot']]
def feat_road_item_text_norm2(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
item_text = {}
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
item_l2 = np.linalg.norm(item_np,axis=1)
n = feat.shape[0]
result = np.zeros((n,3))
result[:,1] = item_l2[feat['road_item']]
result[:,2] = item_l2[feat['item']]
result[:,0] = result[:,1]*result[:,2]
feat['road_item_text_product_norm2'] = result[:,0]
feat['road_item_text_norm2'] = result[:,1]
feat['item_text_norm2'] = result[:,2]
feat.loc[(~feat['item'].isin(item_feat.keys()))|(~feat['road_item'].isin(item_feat.keys())),'road_item_text_product_norm2'] = np.nan
feat.loc[(~feat['road_item'].isin(item_feat.keys())),'road_item_text_norm2'] = np.nan
feat.loc[(~feat['item'].isin(item_feat.keys())),'item_text_norm2'] = np.nan
feat = feat[ ['road_item_text_product_norm2','road_item_text_norm2','item_text_norm2'] ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]]+[np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
#result[i] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( (loc_diff,1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len)) )
result[i,1]/=(result[i,3]+1e-5)
result[i,0]/=(result[i,2]+1e-5)
result_one = sorted(result_one,key=lambda x:x[0])
result_one_len = len(result_one)
result[i,4:] = [x[1] for x in result_one[:topk]] + [np.nan]*max(0,topk-result_one_len)
cols = ['history_loc_diff1_com_item_time_mean',
'future_loc_diff1_com_item_time_mean',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']+[f'i2i_cijs_top{k}_by_loc' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_i2i_cijs_median_mean_topk(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
#median,mean,topk
result = np.zeros((len(new_keys),2+topk))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
result_one = []
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result_one.append( 1 * 1.0 * loc_weight * time_weight / math.log(1 + record_len))
result_one = sorted(result_one,reverse=True)
result_one_len = len(result_one)
result[i,0] = result_one[result_one_len//2] if result_one_len%2==1 else (result_one[result_one_len//2]+result_one[result_one_len//2-1])/2
result[i,1] = sum(result_one)/(len(result_one))
result[i,2:] = result_one[:topk]+[np.nan]*max(0,topk-result_one_len)
cols = ['i2i_cijs_median','i2i_cijs_mean']+[f'i2i_cijs_top{k}_by_cij' for k in range(1,topk+1)]
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ cols ]
return feat
def feat_different_type_road_score_sum_mean(data):
df = data.copy()
feat = df[ ['user','item','index','sim_weight','recall_type'] ]
feat['i2i_score'] = feat['sim_weight']
feat['blend_score'] = feat['sim_weight']
feat['i2i2i_score'] = feat['sim_weight']
feat.loc[ feat['recall_type']!=0 , 'i2i_score'] = np.nan
feat.loc[ feat['recall_type']!=1 , 'blend_score'] = np.nan
feat.loc[ feat['recall_type']!=2 , 'i2i2i_score'] = np.nan
df = feat[ ['index','user','item','i2i_score','blend_score','i2i2i_score'] ]
df = df.groupby( ['user','item'] )[ ['i2i_score','blend_score','i2i2i_score'] ].agg( ['sum','mean'] ).reset_index()
df.columns = ['user','item'] + [ f'{i}_{j}' for i in ['i2i_score','blend_score','i2i2i_score'] for j in ['sum','mean'] ]
feat = pd.merge( feat, df, on=['user','item'], how='left')
feat = feat[ ['i2i_score','i2i_score_sum','i2i_score_mean',
'blend_score','blend_score_sum','blend_score_mean',
'i2i2i_score','i2i2i_score_sum','i2i2i_score_mean',] ]
return feat
def feat_automl_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
cols = []
for cate1 in ['recall_type']:
for cate2 in ['item','road_item']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['recall_type','road_item','item']).size()
tmp.name = 'recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['recall_type','road_item','item'])
cols.append(tmp.name)
print('feat recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_loc_diff_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type'] ]
feat['loc_diff'] = df['query_item_loc']-df['road_item_loc']
cols = []
for cate1 in ['loc_diff']:
for cate2 in ['item','road_item','recall_type']:
name2 = f'{cate1}-{cate2}'
feat_tmp = feat.groupby([cate1,cate2]).size()
feat_tmp.name = f'{name2}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2])
cols.append( name2+'_count' )
print(f'feat {cate1} {cate2} fuck done')
tmp = feat.groupby(['loc_diff','road_item','item']).size()
tmp.name = 'loc_diff-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['loc_diff','road_item','item'])
cols.append(tmp.name)
print('feat loc_diff road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_automl_user_and_recall_type_cate_count(data):
df = data.copy()
feat = df[ ['index','item','road_item','recall_type','user'] ]
cols = []
for cate1 in ['user']:
for cate2 in ['recall_type']:
for cate3 in ['item','road_item']:
name3 = f'{cate1}-{cate2}-{cate3}'
feat_tmp = feat.groupby([cate1,cate2,cate3]).size()
feat_tmp.name = f'{name3}_count'
feat = feat.merge(feat_tmp,how='left',on=[cate1,cate2,cate3])
cols.append( name3+'_count' )
print(f'feat {cate1} {cate2} {cate3} fuck done')
tmp = feat.groupby(['user','recall_type','road_item','item']).size()
tmp.name = 'user-recall_type-road_item-item_count'
feat = feat.merge(tmp,how='left',on=['user','recall_type','road_item','item'])
cols.append(tmp.name)
print('feat user recall_type road_item item fuck done')
feat = feat[ cols ]
return feat
def feat_item_cumcount(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array(v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result = np.zeros(df.shape[0])
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result[int(v[k,0])] = j
k += 1
feat['item_cumcount'] = result
feat['item_cumrate'] = feat['item_cumcount']/feat['item'].map(df_train['item_id'].value_counts()).fillna(1e-5)
return feat[['item_cumcount','item_cumrate']]
def feat_item_qtime_time_diff(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
item_time_list = df_train.sort_values('time').groupby('item_id',sort=False)['time'].agg(list)
for i,v in item_time_list.items():
item_time_list[i] = np.array([0]+v+[1])
df = data.copy()
feat = df[['index','item','query_item_time']]
tmp = feat.set_index('item')
tmp = tmp.sort_values('query_item_time')
tmp = tmp.groupby(['item']).apply(np.array)
result_history = np.zeros(df.shape[0])*np.nan
result_future = np.zeros(df.shape[0])*np.nan
for i,v in tmp.items():
time_list = item_time_list[i]
k = 0
item_n = v.shape[0]
for j in range(1,len(time_list)):
while k<item_n and v[k,1]<time_list[j]:
result_future[int(v[k,0])] = time_list[j]-v[k,1]
result_history[int(v[k,0])] = v[k,1]-time_list[j-1]
k += 1
feat['item_qtime_time_diff_history'] = result_history
feat['item_qtime_time_diff_future'] = result_future
return feat[['item_qtime_time_diff_history','item_qtime_time_diff_future']]
def feat_sim_three_weight_no_clip(data):
df = data.copy()
feat = df[ ['index','road_item','item'] ]
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
loc_weights = {}
time_weights = {}
record_weights = {}
com_item_cnt = {}
item_set = set()
item_dict_set = {}
st0 = time.time()
for user, items in user_item_dict.items():
for item in items:
item_set.add(item)
item_dict_set[item] = set()
for user, items in user_item_dict.items():
times = user_time_dict[user]
for loc1, item in enumerate(items):
loc_weights.setdefault(item, {})
time_weights.setdefault(item, {})
record_weights.setdefault(item, {})
com_item_cnt.setdefault(item, {})
for loc2, relate_item in enumerate(items):
if item == relate_item:
continue
item_dict_set[ item ].add( relate_item )
t1 = times[loc1]
t2 = times[loc2]
loc_weights[item].setdefault(relate_item, 0)
time_weights[item].setdefault(relate_item, 0)
record_weights[item].setdefault(relate_item, 0)
com_item_cnt[item].setdefault(relate_item, 0)
if loc1-loc2>0:
time_weight = (1 - (t1 - t2) * 100)
loc_diff = loc1-loc2-1
loc_weight = (0.9**loc_diff)
else:
time_weight = (1 - (t2 - t1) * 100)
loc_diff = loc2-loc1-1
loc_weight = (0.9**loc_diff)
loc_weights[item][relate_item] += loc_weight
time_weights[item][relate_item] += time_weight
record_weights[item][relate_item] += len(items)
com_item_cnt[item][relate_item] += 1
st1 = time.time()
print(st1-st0)
print('start')
num = feat.shape[0]
road_item = feat['road_item'].values
t_item = feat['item'].values
com_item_loc_weights_sum = np.zeros( num, dtype=float )
com_item_time_weights_sum = np.zeros( num, dtype=float )
com_item_record_weights_sum = np.zeros( num, dtype=float )
t_com_item_cnt = np.zeros( num, dtype=float )
for i in range(num):
if road_item[i] in item_set:
if t_item[i] in item_dict_set[ road_item[i] ]:
com_item_loc_weights_sum[i] = loc_weights[ road_item[i] ][ t_item[i] ]
com_item_time_weights_sum[i] = time_weights[ road_item[i] ][ t_item[i] ]
com_item_record_weights_sum[i] = record_weights[ road_item[i] ][ t_item[i] ]
t_com_item_cnt[i] = com_item_cnt[ road_item[i] ][ t_item[i] ]
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
else:
com_item_loc_weights_sum[i] = np.nan
com_item_time_weights_sum[i] = np.nan
com_item_record_weights_sum[i] = np.nan
t_com_item_cnt[i] = np.nan
feat['com_item_loc_weights_sum_no_clip'] = com_item_loc_weights_sum
feat['com_item_time_weights_sum_no_clip'] = com_item_time_weights_sum
feat['com_item_record_weights_sum'] = com_item_record_weights_sum
feat['com_item_cnt'] = t_com_item_cnt
feat['com_item_loc_weights_mean_no_clip'] = feat['com_item_loc_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_time_weights_mean_no_clip'] = feat['com_item_time_weights_sum_no_clip'] / feat['com_item_cnt']
feat['com_item_record_weights_mean'] = feat['com_item_record_weights_sum'] / feat['com_item_cnt']
feat = feat[ ['com_item_loc_weights_sum_no_clip','com_item_time_weights_sum_no_clip',
'com_item_loc_weights_mean_no_clip','com_item_time_weights_mean_no_clip', ] ]
st2 = time.time()
print(st2-st1)
return feat
def feat_u2i_road_item_before_and_after_query_time_diff(data):
df = data.copy()
feat = df[['user','road_item_loc','road_item_time','query_item_time']]
feat_h = feat.loc[feat['road_item_time']<feat['query_item_time']]
feat_f = feat.loc[feat['road_item_time']>feat['query_item_time']]
feat_h = feat_h.groupby(['user','road_item_loc']).first().reset_index()
feat_f = feat_f.groupby(['user','road_item_loc']).first().reset_index()
feat_h_group = feat_h.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat_f_group = feat_f.sort_values(['user','road_item_loc']).set_index(['user','road_item_loc']).groupby('user')
feat1 = feat_h_group['road_item_time'].diff(1)
feat2 = feat_h_group['road_item_time'].diff(-1)
feat3 = feat_f_group['road_item_time'].diff(1)
feat4 = feat_f_group['road_item_time'].diff(-1)
feat1.name = 'u2i_road_item_before_query_time_diff_history'
feat2.name = 'u2i_road_item_before_query_time_diff_future'
feat3.name = 'u2i_road_item_after_query_time_diff_history'
feat4.name = 'u2i_road_item_after_query_time_diff_future'
feat = df.merge(pd.concat([feat1,feat2,feat3,feat4],axis=1),how='left',on=['user','road_item_loc'])
cols = ['u2i_road_item_before_query_time_diff_history',
'u2i_road_item_before_query_time_diff_future',
'u2i_road_item_after_query_time_diff_history',
'u2i_road_item_after_query_time_diff_future']
feat = feat[ cols ]
return feat
def feat_i2i_cijs_topk_by_loc_new(data):
if mode == 'valid':
df_train = utils.load_pickle(all_train_data_path.format(cur_stage))
elif mode == 'test':
df_train = utils.load_pickle(online_all_train_data_path.format(cur_stage))
user_item_ = df_train.groupby('user_id')['item_id'].agg(list).reset_index()
user_item_dict = dict(zip(user_item_['user_id'], user_item_['item_id']))
user_time_ = df_train.groupby('user_id')['time'].agg(list).reset_index()
user_time_dict = dict(zip(user_time_['user_id'], user_time_['time']))
item_cnt = defaultdict(int)
for user, items in user_item_dict.items():
for loc1, item in enumerate(items):
item_cnt[item] += 1
df = data.copy()
feat = df[ ['index','road_item','item'] ]
print('Loading i2i_sim_seq')
i2i_sim_seq = utils.load_pickle( feat_dir + f'{used_recall_source}/' + (f'feat_i2i_seq_{mode}_{cur_stage}.pkl') )
print('Finished i2i_sim_seq')
print('Creat new key')
vals = feat[ ['road_item', 'item'] ].values
new_keys = []
for val in vals:
new_keys.append( (val[0], val[1]) )
feat['new_keys'] = new_keys
new_keys = sorted( list( set(new_keys) ) )
print('Finished new key')
print('Getting result')
topk = 3
loc_base = 0.9
print(f'Starting {loc_base}')
result = np.zeros((len(new_keys),4))
for i in range(len(new_keys)):
key = new_keys[i]
if key not in i2i_sim_seq.keys():
result[i,:] = np.nan
continue
records = i2i_sim_seq[key]
if len(records)==0:
print(key)
for record in records:
loc1, loc2, t1, t2, record_len = record
if loc1-loc2>0:
if loc1-loc2==1:
result[i,2] += 1
result[i,0] += (t1 - t2)
time_weight = (1 - (t1 - t2) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc1-loc2-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
else:
if loc2-loc1==1:
result[i,3] += 1
result[i,1] += (t2 - t1)
time_weight = (1 - (t2 - t1) * 100)
if time_weight<=0.2:
time_weight = 0.2
loc_diff = loc2-loc1-1
loc_weight = (loc_base**loc_diff)
if loc_weight <= 0.2:
loc_weight = 0.2
result[:,1]/=(result[:,3]+1e-5)
result[:,0]/=(result[:,2]+1e-5)
cols = ['history_loc_diff1_com_item_time_mean_new',
'future_loc_diff1_com_item_time_mean_new',
'history_loc_diff1_com_item_cnt',
'future_loc_diff1_com_item_cnt']
result = pd.DataFrame(result,columns=cols,index=new_keys)
result = result.reset_index()
result.rename(columns={'index':'new_keys'},inplace=True)
feat = feat.merge(result,how='left',on='new_keys')
print('Finished getting result')
feat = feat[ ['history_loc_diff1_com_item_time_mean_new','future_loc_diff1_com_item_time_mean_new'] ]
return feat
def feat_items_list_len(data):
df = data.copy()
feat = df[ ['index','user','left_items_list','right_items_list','stage'] ]
def func(s):
return len(s)
tdata = feat.groupby('user').first()
tdata['left_items_list_len'] = tdata['left_items_list'].apply( func )
tdata['right_items_list_len'] = tdata['right_items_list'].apply( func )
import pdb
pdb.set_trace()
return feat
def feat_item_cnt_in_stage2_mean_max_min_by_user(data):
if mode=='valid':
all_train_stage_data = utils.load_pickle(all_train_stage_data_path.format(cur_stage))
else:
all_train_stage_data = utils.load_pickle(online_all_train_stage_data_path.format(cur_stage))
item_stage_cnt = all_train_stage_data.groupby(["item_id"])["stage"].value_counts().to_dict()
feat = data[["user","item", "stage"]]
feat["head"] = feat.set_index(["item", "stage"]).index
feat["item_stage_cnt"] = feat["head"].map(item_stage_cnt)
tmp = feat.groupby('user')['item_stage_cnt'].agg(['mean','max','min'])
tmp.columns = [f'item_cnt_in_stage2_{i}_by_user' for i in tmp.columns]
feat = feat.merge(tmp,how='left',on='user')
return feat[tmp.columns]
def feat_item_seq_sim_cossim_text(data):
df = data.copy()
feat = df[ ['left_items_list','right_items_list','item'] ]
item_feat = utils.load_pickle(item_feat_pkl)
item_n = 120000
item_np = np.zeros((item_n,128))
for k,v in item_feat.items():
item_np[k,:] = v[0]
all_items = np.array(sorted(item_feat.keys()))
item_np = item_np/(np.linalg.norm(item_np,axis=1,keepdims=True)+1e-9)
batch_size = 10000
n = len(feat)
batch_num = n//batch_size if n%batch_size==0 else n//batch_size+1
feat['left_len'] = feat['left_items_list'].apply(len)
feat_left = feat.sort_values('left_len')
feat_left_len = feat_left['left_len'].values
feat_left_items_list = feat_left['left_items_list'].values
feat_left_items = feat_left['item'].values
left_result = np.zeros((len(feat_left),2))
left_result_len = np.zeros(len(feat_left))
for i in range(batch_num):
cur_batch_size = len(feat_left_len[i*batch_size:(i+1)*batch_size])
max_len = feat_left_len[i*batch_size:(i+1)*batch_size].max()
max_len = max(max_len,1)
left_items = np.zeros((cur_batch_size,max_len),dtype='int32')
for j,arr in enumerate(feat_left_items_list[i*batch_size:(i+1)*batch_size]):
left_items[j][:len(arr)] = arr
left_result_len[i*batch_size:(i+1)*batch_size] = np.isin(left_items,all_items).sum(axis=1)
vec1 = item_np[left_items]
vec2 = item_np[feat_left_items[i*batch_size:(i+1)*batch_size]]
vec2 = vec2.reshape(-1,1,128)
sim = np.sum(vec1*vec2,axis=-1)
left_result[i*batch_size:(i+1)*batch_size,0] = sim.max(axis=1)
left_result[i*batch_size:(i+1)*batch_size,1] = sim.sum(axis=1)
if i % 10 == 0:
print('batch num',i)
df_left = | pd.DataFrame(left_result,index=feat_left.index,columns=['left_allitem_item_textsim_max','left_allitem_item_textsim_sum']) | pandas.DataFrame |
# TODO make it handle missing data
from __future__ import unicode_literals
__all__ = [
'clean_FIPS',
'fix_FIPS',
'get_custom_bins',
'make_choropleth',
'AreaPopDataset',
'CityInfo',
'CityLabel',
'ChoroplethStyle',
'Choropleth'
]
import geopandas as gpd
import numpy as np
import pandas as pd
import matplotlib
import os
import textwrap
import re
import math
from six import string_types
from matplotlib import pyplot as plt, patches as mpatches
from matplotlib.colors import LinearSegmentedColormap, ListedColormap, hex2color
def clean_FIPS(FIPS_code):
'''Converts a number sequence to a string and removes alphanumeric
characters.'''
FIPS_code = str(FIPS_code)
FIPS_code = re.sub('[\W_]+', '', FIPS_code)
if re.match('^[0-9]*$', FIPS_code) is None:
raise ValueError('Data contains non-digit FIPS code values')
return FIPS_code
def fix_FIPS(data, county_col, state_FIPS=None):
'''Takes FIPS data and outputs a dataframe with a FIPS column containing
5-digit, merged, state and county FIPS codes.
Args:
data(pandas DataFrame): data with FIPS columns
county_col(str): name of column with county FIPS codes.
state_FIPS(str): either the name of the column with state FIPS codes
or a 2-digit state FIPS code in string format, e.g. ('25)
can be none if county_col has combiend, 5-digit codes
Returns:
data(pandas DataFrame): with combined FIPS column added'''
FIPS_col = 'FIPS' # name of the FIPS column to be added
# Check for null values
if data[county_col].isnull().values.any():
raise ValueError('Data contains empty FIPS code values.')
# Clean the county codes
data[county_col] = data.loc[:, county_col].map(
lambda x: clean_FIPS(x))
data[county_col] = data.loc[:, county_col].apply(
lambda x: x.zfill(3) if len(x) < 3 else x)
# check if state codes need to be added
if (data[county_col].str.len() == 3).any():
if state_FIPS in data.columns: # if a column name is entered
state_col = state_FIPS
else:
state_FIPS = clean_FIPS(state_FIPS)
if len(state_FIPS) != 2:
raise ValueError('Data contains State FIPS not in a ' +
'readable format. Entry must be a string ' +
'column name or a 2-digit state FIPS code')
state_col = 'state_FIPS'
data[state_col] = state_FIPS # create a state FIPS column
data[state_col] = data.loc[:, state_col].map(
lambda x: clean_FIPS(x)) # clean the state FIPS column
data[county_col] = data.loc[:, county_col].apply(
lambda x: x[-3:]) # Make it all consistent
data[county_col] = data[state_col].str.cat(data[county_col], sep='')
# Check that codes are the right length
# if it drops leading zeros
data[county_col] = data[county_col].str.zfill(5)
if (data[county_col].str.len() == 5).all(): # codes already combined
data[FIPS_col] = data[county_col]
else:
raise ValueError(
'Data contains FIPS code values that violate length ' +
'requirements. Entries shold be a 3-digit county code ' +
'or a 5-digit state and county code.')
return data
def round_py2(x, d=0):
'''rounds up--Python2 and Python3 have different rounding behavior'''
p = 10 ** d
return float(math.floor((x * p) + math.copysign(0.5, x)))/p
# TODO decide if you want this out of the object
def get_custom_bins(level, num_cats=4, dif=.1, direction=None, precision=1):
'''Creates percent cutoff bins a certain amount away from an index
Args:
level(float): index marker, fractions will be multiplied by 100.
Unless specified, the level acts as the midpoint. If the
direction is 'pos', the level acts as the lower endpoint for the
second lowest category_number. The level should not be zero;
negative levels are not allowed.
dif(float): how much the other cats should step up or down.
This is a multiplier.
num_cats(int): how many categories to split the bins into
direction(str): None or pos; a positive direction makes bins from
0 to level; otherwise level is the midpoint
precision(int): what to round to
Returns:
bins(list[float]): list of bin cutoff points
'''
# Negative levels are not allowed
if float(level) <= 0:
raise ValueError(
'Level is less than or equal to zero.' +
'get_custom_bins only makes positive categories.')
# Levels are assumed to be percentages
if float(level) < 1:
level = float(level) * 100.0
level = round(level, 1)
plus_mult = 1
minus_mult = 1
mid = int(round_py2(float(num_cats)/2)) # In case there's an odd number
if direction == 'pos':
bins_dict = {0: 0.0, 1: level, num_cats: 100.0}
for i in range(2, num_cats):
plus_mult += dif
bins_dict[i] = round(level*plus_mult, precision)
else: # direction is None
bins_dict = {0: 0.0, mid: float(level)}
for i in range(1, mid):
plus_mult += dif
minus_mult -= dif
bins_dict[mid + i] = round(level*plus_mult, 1)
bins_dict[mid - i] = round(level*minus_mult, 1)
bins_dict[num_cats] = 100.0
bins = []
for j in sorted(bins_dict, key=bins_dict.get):
bins.append(bins_dict[j])
return bins
def axis_data_coords_sys_transform(ax_obj_in, xin, yin, inverse=False):
'''Goes between axis and data coordinate systems
Args:
axis_obj_in(matplotlib axes object): the one in use
xin(float): x to transform
yin(float): y to transform
inverse(bool):
inverse = False : Axis => Data
True : Data => Axis
'''
xlim = ax_obj_in.get_xlim()
ylim = ax_obj_in.get_ylim()
xdelta = xlim[1] - xlim[0]
ydelta = ylim[1] - ylim[0]
if not inverse:
xout = xlim[0] + xin * xdelta
yout = ylim[0] + yin * ydelta
else:
xdelta2 = xin - xlim[0]
ydelta2 = yin - ylim[0]
xout = xdelta2 / xdelta
yout = ydelta2 / ydelta
return xout, yout
def make_choropleth(data_csv, shpfile, two_digit_state_FIPS,
title='', footnote='', cat_name=None,
geoFIPS_col=None, geometry_col=None,
legx=.07, legy=0.18):
'''Args:
data_csv(str): normed path name to csv file containing data.
1)Extension is ".csf"
2)No lading rows or columns
3)No footnotes, annotations, or comments
4)Columns should be named ["FIPS",
"category" for the population that fulfills the category
requirment, "total" or None, any additonal columns]
5)The data set should have at least one cateogry column or total column
shpfile(str): normed path name to shapefile
two_digit_state_FIPS(str or int): two digit state FIPS code,
title(str): title for map
footnote(str): footnote to put under the legend
geoFIPS_col(str): name of the FIPS column in the GeoDataFrame,
default is 'COUNTYFP'
geometry_col(str) : name of the geometry_col, default is "geometry",
legx(float): axis position for x of legend bounding box point
legy(float): axis position for y of legend bounding box point
'''
two_digit_state_FIPS = str(two_digit_state_FIPS).zfill(2)
data_csv = os.path.normpath(data_csv)
shpfile = os.path.normpath(shpfile)
data = | pd.read_csv(data_csv) | pandas.read_csv |
# Notebook to transform OSeMOSYS output to same format as EGEDA
# Import relevant packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
from openpyxl import Workbook
import xlsxwriter
import pandas.io.formats.excel
import glob
import re
# Path for OSeMOSYS output
path_output = './data/3_OSeMOSYS_output'
# Path for OSeMOSYS to EGEDA mapping
path_mapping = './data/2_Mapping_and_other'
# Where to save finalised dataframe
path_final = './data/4_Joined'
# OSeMOSYS results files
OSeMOSYS_filenames = glob.glob(path_output + "/*.xlsx")
# Reference filenames and net zero filenames
reference_filenames = list(filter(lambda k: 'reference' in k, OSeMOSYS_filenames))
netzero_filenames = list(filter(lambda y: 'net-zero' in y, OSeMOSYS_filenames))
# New 2018 data variable names
Mapping_sheets = list(pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = None).keys())[1:]
Mapping_file = pd.DataFrame()
for sheet in Mapping_sheets:
interim_map = pd.read_excel(path_mapping + '/OSeMOSYS_mapping_2021.xlsx', sheet_name = sheet, skiprows = 1)
Mapping_file = Mapping_file.append(interim_map).reset_index(drop = True)
# Moving everything from OSeMOSYS to EGEDA for TFC and TPES
Mapping_TFC_TPES = Mapping_file[Mapping_file['Balance'].isin(['TFC', 'TPES'])]
# And for transformation
Map_trans = Mapping_file[Mapping_file['Balance'] == 'TRANS'].reset_index(drop = True)
# A mapping just for i) power, ii) ref, own, sup and iii) hydrogen
Map_power = Map_trans[Map_trans['Sector'] == 'POW'].reset_index(drop = True)
Map_refownsup = Map_trans[Map_trans['Sector'].isin(['REF', 'SUP', 'OWN', 'HYD'])].reset_index(drop = True)
Map_hydrogen = Map_trans[Map_trans['Sector'] == 'HYD'].reset_index(drop = True)
# Define unique workbook and sheet combinations for TFC and TPES
Unique_TFC_TPES = Mapping_TFC_TPES.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
# Define unique workbook and sheet combinations for Transformation
Unique_trans = Map_trans.groupby(['Workbook', 'Sheet_energy']).size().reset_index().loc[:, ['Workbook', 'Sheet_energy']]
################################### TFC and TPES #############################################################
# Determine list of files to read based on the workbooks identified in the mapping file for REFERENCE scenario
ref_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in reference_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
ref_file_df = ref_file_df.append(_file)
ref_file_df = ref_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Determine list of files to read based on the workbooks identified in the mapping file for NET-ZERO scenario
netz_file_df = pd.DataFrame()
for i in range(len(Unique_TFC_TPES['Workbook'].unique())):
_file = pd.DataFrame({'File': [entry for entry in netzero_filenames if Unique_TFC_TPES['Workbook'].unique()[i] in entry],
'Workbook': Unique_TFC_TPES['Workbook'].unique()[i]})
netz_file_df = netz_file_df.append(_file)
netz_file_df = netz_file_df.merge(Unique_TFC_TPES, how = 'outer', on = 'Workbook')
# Create empty dataframe to store REFERENCE aggregated results
ref_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (ref_aggregate_df1)
if ref_file_df['File'].isna().any() == False:
for i in range(ref_file_df.shape[0]):
_df = pd.read_excel(ref_file_df.iloc[i, 0], sheet_name = ref_file_df.iloc[i, 2])
_df['Workbook'] = ref_file_df.iloc[i, 1]
_df['Sheet_energy'] = ref_file_df.iloc[i, 2]
ref_aggregate_df1 = ref_aggregate_df1.append(_df)
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = ref_aggregate_df1[ref_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
ref_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = ref_aggregate_df1[~ref_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
ref_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Create empty dataframe to store NET ZERO aggregated results
netz_aggregate_df1 = pd.DataFrame(columns = ['TECHNOLOGY', 'FUEL', 'REGION', 2050])
# Now read in the OSeMOSYS output files so that that they're all in one data frame (netz_aggregate_df1)
if netz_file_df['File'].isna().any() == False:
for i in range(netz_file_df.shape[0]):
_df = pd.read_excel(netz_file_df.iloc[i, 0], sheet_name = netz_file_df.iloc[i, 2])
_df['Workbook'] = netz_file_df.iloc[i, 1]
_df['Sheet_energy'] = netz_file_df.iloc[i, 2]
netz_aggregate_df1 = netz_aggregate_df1.append(_df)
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] != 'ONE']
interim_df2 = netz_aggregate_df1[netz_aggregate_df1['TIMESLICE'] == 'ONE']
interim_df1 = interim_df1.groupby(['TECHNOLOGY', 'FUEL', 'REGION', 'Workbook', 'Sheet_energy']).sum().reset_index()
netz_aggregate_df1 = interim_df2.append(interim_df1).reset_index(drop = True)
# bunkers draw downs and build. Need to change stock build to negative
interim_stock1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()\
.set_index(['TECHNOLOGY', 'FUEL', 'REGION', 'TIMESLICE', 'Workbook', 'Sheet_energy'])
interim_stock2 = netz_aggregate_df1[~netz_aggregate_df1['TECHNOLOGY']\
.isin(['SUP_6_1_crude_oil_stock_build',
'SUP_8_1_natural_gas_stock_build',
'SUP_2_coal_products_stock_build'])].copy()
interim_stock1 = interim_stock1 * -1
interim_stock1 = interim_stock1.reset_index()
# Stitch back together
netz_aggregate_df1 = interim_stock2.append(interim_stock1).reset_index(drop = True)
# Now aggregate all the results for APEC
# REFERENCE
APEC_ref = ref_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_ref['REGION'] = 'APEC'
ref_aggregate_df1 = ref_aggregate_df1.append(APEC_ref).reset_index(drop = True)
# NET ZERO
APEC_netz = netz_aggregate_df1.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
APEC_netz['REGION'] = 'APEC'
netz_aggregate_df1 = netz_aggregate_df1.append(APEC_netz).reset_index(drop = True)
# Now aggregate results for 22_SEA
# Southeast Asia: 02, 07, 10, 15, 17, 19, 21
# REFERENCE
SEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_ref['REGION'] = '22_SEA'
ref_aggregate_df1 = ref_aggregate_df1.append(SEA_ref).reset_index(drop = True)
# NET ZERO
SEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['02_BD', '07_INA', '10_MAS', '15_RP', '17_SIN', '19_THA', '21_VN'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
SEA_netz['REGION'] = '22_SEA'
netz_aggregate_df1 = netz_aggregate_df1.append(SEA_netz).reset_index(drop = True)
# Aggregate results for 23_NEA
# Northeast Asia: 06, 08, 09, 18
# REFERENCE
NEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_ref['REGION'] = '23_NEA'
ref_aggregate_df1 = ref_aggregate_df1.append(NEA_ref).reset_index(drop = True)
# NET ZERO
NEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '08_JPN', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
NEA_netz['REGION'] = '23_NEA'
netz_aggregate_df1 = netz_aggregate_df1.append(NEA_netz).reset_index(drop = True)
# Aggregate results for 23b_ONEA
# ONEA: 06, 09, 18
# REFERENCE
ONEA_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_ref['REGION'] = '23b_ONEA'
ref_aggregate_df1 = ref_aggregate_df1.append(ONEA_ref).reset_index(drop = True)
# NET ZERO
ONEA_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['06_HKC', '09_ROK', '18_CT'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
ONEA_netz['REGION'] = '23b_ONEA'
netz_aggregate_df1 = netz_aggregate_df1.append(ONEA_netz).reset_index(drop = True)
# Aggregate results for 24_OAM
# OAM: 03, 04, 11, 14
# REFERENCE
OAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_ref['REGION'] = '24_OAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OAM_ref).reset_index(drop = True)
# NET ZERO
OAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['03_CDA', '04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OAM_netz['REGION'] = '24_OAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OAM_netz).reset_index(drop = True)
# Aggregate results for 24b_OOAM
# OOAM: 04, 11, 14
# REFERENCE
OOAM_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_ref['REGION'] = '24b_OOAM'
ref_aggregate_df1 = ref_aggregate_df1.append(OOAM_ref).reset_index(drop = True)
# NET ZERO
OOAM_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['04_CHL', '11_MEX', '14_PE'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OOAM_netz['REGION'] = '24b_OOAM'
netz_aggregate_df1 = netz_aggregate_df1.append(OOAM_netz).reset_index(drop = True)
# Aggregate results for 25_OCE
# Oceania: 01, 12, 13
# REFERENCE
OCE_ref = ref_aggregate_df1[ref_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_ref['REGION'] = '25_OCE'
ref_aggregate_df1 = ref_aggregate_df1.append(OCE_ref).reset_index(drop = True)
# NET ZERO
OCE_netz = netz_aggregate_df1[netz_aggregate_df1['REGION']\
.isin(['01_AUS', '12_NZ', '13_PNG'])]\
.groupby(['TECHNOLOGY', 'FUEL']).sum().reset_index()
OCE_netz['REGION'] = '25_OCE'
netz_aggregate_df1 = netz_aggregate_df1.append(OCE_netz).reset_index(drop = True)
# Get maximum REFERENCE year column to build data frame below
ref_year_columns = []
for item in list(ref_aggregate_df1.columns):
try:
ref_year_columns.append(int(item))
except ValueError:
pass
max_year_ref = max(ref_year_columns)
OSeMOSYS_years_ref = list(range(2017, max_year_ref + 1))
# Get maximum NET ZERO year column to build data frame below
netz_year_columns = []
for item in list(netz_aggregate_df1.columns):
try:
netz_year_columns.append(int(item))
except ValueError:
pass
max_year_netz = max(netz_year_columns)
OSeMOSYS_years_netz = list(range(2017, max_year_netz + 1))
#################################################################################################
### ADJUNCT; LAST MINUTE GRAB of LNG/PIPELINE imports and exports which are only from OSeMOSYS
# This script is a bit messy as there are two chunks that have ref_aggregate_df1
# Building the grab here as it grabs from the first ref_aggregate_df1 which is more comprehensive
# i.e. it has region aggregates such as OOAM, OCE and APEC in addition to economies
ref_lngpipe_1 = ref_aggregate_df1[ref_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_ref].reset_index(drop = True)
ref_lngpipe_1.to_csv(path_final + '/lngpipe_reference.csv', index = False)
netz_lngpipe_1 = netz_aggregate_df1[netz_aggregate_df1['TECHNOLOGY'].isin(['SUP_8_1_natural_gas_import',\
'SUP_8_2_lng_import', 'SUP_8_1_natural_gas_export', 'SUP_8_2_lng_export'])].copy()\
.loc[:, ['REGION', 'TECHNOLOGY'] + OSeMOSYS_years_netz].reset_index(drop = True)
netz_lngpipe_1.to_csv(path_final + '/lngpipe_netzero.csv', index = False)
###################################################################################################
########################## fuel_code aggregations ##########################
# First level
coal_fuels = ['1_1_coking_coal', '1_5_lignite', '1_x_coal_thermal']
oil_fuels = ['6_1_crude_oil', '6_x_ngls']
petrol_fuels = ['7_1_motor_gasoline', '7_2_aviation_gasoline', '7_3_naphtha', '7_x_jet_fuel', '7_6_kerosene', '7_7_gas_diesel_oil',
'7_8_fuel_oil', '7_9_lpg', '7_10_refinery_gas_not_liquefied', '7_11_ethane', '7_x_other_petroleum_products']
gas_fuels = ['8_1_natural_gas', '8_2_lng', '8_3_gas_works_gas']
biomass_fuels = ['15_1_fuelwood_and_woodwaste', '15_2_bagasse', '15_3_charcoal', '15_4_black_liquor', '15_5_other_biomass']
other_fuels = ['16_1_biogas', '16_2_industrial_waste', '16_3_municipal_solid_waste_renewable', '16_4_municipal_solid_waste_nonrenewable', '16_5_biogasoline', '16_6_biodiesel',
'16_7_bio_jet_kerosene', '16_8_other_liquid_biofuels', '16_9_other_sources', '16_x_hydrogen']
# Total
total_fuels = ['1_coal', '2_coal_products', '5_oil_shale_and_oil_sands', '6_crude_oil_and_ngl', '7_petroleum_products', '8_gas', '9_nuclear', '10_hydro', '11_geothermal',
'12_solar', '13_tide_wave_ocean', '14_wind', '15_solid_biomass', '16_others', '17_electricity', '18_heat']
# total_renewables to be completed
##############################################################################
# item_code_new aggregations
# Lowest level
industry_agg = ['14_1_iron_and_steel', '14_2_chemical_incl_petrochemical', '14_3_non_ferrous_metals', '14_4_nonmetallic_mineral_products', '14_5_transportation_equipment',
'14_6_machinery', '14_7_mining_and_quarrying', '14_8_food_beverages_and_tobacco', '14_9_pulp_paper_and_printing', '14_10_wood_and_wood_products',
'14_11_construction', '14_12_textiles_and_leather', '14_13_nonspecified_industry']
transport_agg = ['15_1_domestic_air_transport', '15_2_road', '15_3_rail', '15_4_domestic_navigation', '15_5_pipeline_transport', '15_6_nonspecified_transport']
others_agg = ['16_1_commercial_and_public_services', '16_2_residential', '16_3_agriculture', '16_4_fishing', '16_5_nonspecified_others']
# Then first level
tpes_agg = ['1_indigenous_production', '2_imports', '3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers', '6_stock_change']
tfc_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector', '17_nonenergy_use']
tfec_agg = ['14_industry_sector', '15_transport_sector', '16_other_sector']
# For dataframe finalising
key_variables = ['economy', 'fuel_code', 'item_code_new']
#######################################################################################################################
# REFERENCE
# Now aggregate data based on the mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
ref_aggregate_df2 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in ref_aggregate_df1['REGION'].unique():
interim_df1 = ref_aggregate_df1[ref_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Mapping_TFC_TPES, how = 'left', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['item_code_new', 'fuel_code']).sum().reset_index()
# Change export data to negative values
exports_bunkers = interim_df1[interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]\
.set_index(['item_code_new', 'fuel_code'])
everything_else = interim_df1[~interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]
exports_bunkers = exports_bunkers * -1
exports_bunkers = exports_bunkers.reset_index()
interim_df2 = everything_else.append(exports_bunkers)
########################### Aggregate fuel_code for new variables ###################################
# First level fuels
coal = interim_df2[interim_df2['fuel_code'].isin(coal_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_coal').reset_index()
oil = interim_df2[interim_df2['fuel_code'].isin(oil_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_crude_oil_and_ngl').reset_index()
petrol = interim_df2[interim_df2['fuel_code'].isin(petrol_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_petroleum_products').reset_index()
gas = interim_df2[interim_df2['fuel_code'].isin(gas_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '8_gas').reset_index()
biomass = interim_df2[interim_df2['fuel_code'].isin(biomass_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '15_solid_biomass').reset_index()
others = interim_df2[interim_df2['fuel_code'].isin(other_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '16_others').reset_index()
interim_df3 = interim_df2.append([coal, oil, petrol, gas, biomass, others]).reset_index(drop = True)
# And total fuels
total_f = interim_df3[interim_df3['fuel_code'].isin(total_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '19_total').reset_index()
interim_df4 = interim_df3.append(total_f).reset_index(drop = True)
################################ And now item_code_new ######################################
# Start with lowest level
industry = interim_df4[interim_df4['item_code_new'].isin(industry_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '14_industry_sector').reset_index()
transport = interim_df4[interim_df4['item_code_new'].isin(transport_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '15_transport_sector').reset_index()
bld_ag_other = interim_df4[interim_df4['item_code_new'].isin(others_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '16_other_sector').reset_index()
interim_df5 = interim_df4.append([industry, transport, bld_ag_other]).reset_index(drop = True)
# Now higher level agg
#Might need to check this depending on whether exports is negative
tpes = interim_df5[interim_df5['item_code_new'].isin(tpes_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '7_total_primary_energy_supply').reset_index()
tfc = interim_df5[interim_df5['item_code_new'].isin(tfc_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '12_total_final_consumption').reset_index()
tfec = interim_df5[interim_df5['item_code_new'].isin(tfec_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '13_total_final_energy_consumption').reset_index()
interim_df6 = interim_df5.append([tpes, tfc, tfec]).reset_index(drop = True)
# Now add in economy reference
interim_df6['economy'] = region
# Now append economy dataframe to communal data frame
ref_aggregate_df2 = ref_aggregate_df2.append(interim_df6)
# aggregate_df2 = aggregate_df2[['economy', 'fuel_code', 'item_code_new'] + OSeMOSYS_years]
if ref_aggregate_df2.empty:
ref_aggregate_df2
else:
ref_aggregate_df2 = ref_aggregate_df2.loc[:, key_variables + OSeMOSYS_years_ref]
#######################################################################################################################
# NET ZERO
# Now aggregate data based on the mapping
# That is group by REGION, TECHNOLOGY and FUEL
# First create empty dataframe
netz_aggregate_df2 = pd.DataFrame()
# Then loop through based on different regions/economies and stitch back together
for region in netz_aggregate_df1['REGION'].unique():
interim_df1 = netz_aggregate_df1[netz_aggregate_df1['REGION'] == region]
interim_df1 = interim_df1.merge(Mapping_TFC_TPES, how = 'left', on = ['TECHNOLOGY', 'FUEL'])
interim_df1 = interim_df1.groupby(['item_code_new', 'fuel_code']).sum().reset_index()
# Change export data to negative values
exports_bunkers = interim_df1[interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]\
.set_index(['item_code_new', 'fuel_code'])
everything_else = interim_df1[~interim_df1['item_code_new'].isin(['3_exports', '4_international_marine_bunkers', '5_international_aviation_bunkers'])]
exports_bunkers = exports_bunkers * -1
exports_bunkers = exports_bunkers.reset_index()
interim_df2 = everything_else.append(exports_bunkers)
########################### Aggregate fuel_code for new variables ###################################
# First level fuels
coal = interim_df2[interim_df2['fuel_code'].isin(coal_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '1_coal').reset_index()
oil = interim_df2[interim_df2['fuel_code'].isin(oil_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '6_crude_oil_and_ngl').reset_index()
petrol = interim_df2[interim_df2['fuel_code'].isin(petrol_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '7_petroleum_products').reset_index()
gas = interim_df2[interim_df2['fuel_code'].isin(gas_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '8_gas').reset_index()
biomass = interim_df2[interim_df2['fuel_code'].isin(biomass_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '15_solid_biomass').reset_index()
others = interim_df2[interim_df2['fuel_code'].isin(other_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '16_others').reset_index()
interim_df3 = interim_df2.append([coal, oil, petrol, gas, biomass, others]).reset_index(drop = True)
# And total fuels
total_f = interim_df3[interim_df3['fuel_code'].isin(total_fuels)].groupby(['item_code_new'])\
.sum().assign(fuel_code = '19_total').reset_index()
interim_df4 = interim_df3.append(total_f).reset_index(drop = True)
################################ And now item_code_new ######################################
# Start with lowest level
industry = interim_df4[interim_df4['item_code_new'].isin(industry_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '14_industry_sector').reset_index()
transport = interim_df4[interim_df4['item_code_new'].isin(transport_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '15_transport_sector').reset_index()
bld_ag_other = interim_df4[interim_df4['item_code_new'].isin(others_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '16_other_sector').reset_index()
interim_df5 = interim_df4.append([industry, transport, bld_ag_other]).reset_index(drop = True)
# Now higher level agg
#Might need to check this depending on whether exports is negative
tpes = interim_df5[interim_df5['item_code_new'].isin(tpes_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '7_total_primary_energy_supply').reset_index()
tfc = interim_df5[interim_df5['item_code_new'].isin(tfc_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '12_total_final_consumption').reset_index()
tfec = interim_df5[interim_df5['item_code_new'].isin(tfec_agg)].groupby(['fuel_code'])\
.sum().assign(item_code_new = '13_total_final_energy_consumption').reset_index()
interim_df6 = interim_df5.append([tpes, tfc, tfec]).reset_index(drop = True)
# Now add in economy reference
interim_df6['economy'] = region
# Now append economy dataframe to communal data frame
netz_aggregate_df2 = netz_aggregate_df2.append(interim_df6)
# aggregate_df2 = aggregate_df2[['economy', 'fuel_code', 'item_code_new'] + OSeMOSYS_years]
if netz_aggregate_df2.empty == True:
netz_aggregate_df2
else:
netz_aggregate_df2 = netz_aggregate_df2.loc[:, key_variables + OSeMOSYS_years_netz]
# Now load the EGEDA_years data frame
EGEDA_years = pd.read_csv('./data/1_EGEDA/EGEDA_2018_years.csv')
# REFERENCE
if ref_aggregate_df2.empty == True:
ref_aggregate_df2_tojoin = ref_aggregate_df2.copy()
else:
ref_aggregate_df2_tojoin = ref_aggregate_df2.copy().loc[:, key_variables + OSeMOSYS_years_ref]
# NET ZERO
if netz_aggregate_df2.empty == True:
netz_aggregate_df2_tojoin = netz_aggregate_df2.copy()
else:
netz_aggregate_df2_tojoin = netz_aggregate_df2.copy().loc[:, key_variables + OSeMOSYS_years_netz]
# Join EGEDA historical to OSeMOSYS results (line below removes 2017 and 2018 from historical)
# REFERENCE
if ref_aggregate_df2_tojoin.empty == True:
Joined_ref_df = EGEDA_years.copy().reindex(columns = EGEDA_years.columns.tolist() + list(range(2019, 2051)))
else:
Joined_ref_df = EGEDA_years.copy().iloc[:, :-2].merge(ref_aggregate_df2_tojoin, on = ['economy', 'fuel_code', 'item_code_new'], how = 'left')
Joined_ref_df.to_csv(path_final + '/OSeMOSYS_to_EGEDA_2018_reference.csv', index = False)
# NET ZERO
if netz_aggregate_df2_tojoin.empty == True:
Joined_netz_df = EGEDA_years.copy().reindex(columns = EGEDA_years.columns.tolist() + list(range(2019, 2051)))
else:
Joined_netz_df = EGEDA_years.copy().iloc[:, :-2].merge(netz_aggregate_df2_tojoin, on = ['economy', 'fuel_code', 'item_code_new'], how = 'left')
Joined_netz_df.to_csv(path_final + '/OSeMOSYS_to_EGEDA_2018_netzero.csv', index = False)
###############################################################################################################################
# Moving beyond TFC and TPES and Transformation
# Determine list of files to read based on the workbooks identified in the mapping file
# REFERENCE
ref_file_trans = | pd.DataFrame() | pandas.DataFrame |
import datetime
import os
import pickle
import urllib.parse
import urllib.request as request
from collections import Counter
from contextlib import closing
from datetime import timedelta
from pathlib import Path
import numpy as np
import pandas as pd
import tika
import wget
from dateutil import parser
os.environ['TIKA_SERVER_JAR'] = 'https://repo1.maven.org/maven2/org/apache/tika/tika-server/'+tika.__version__+'/tika-server-'+tika.__version__+'.jar'
from tika import parser
from cadmus.retrieval.search_terms_to_pmid_list import search_terms_to_pmid_list
from cadmus.pre_retrieval.pmids_to_medline_file import pmids_to_medline_file
from cadmus.pre_retrieval.creation_retrieved_df import creation_retrieved_df
from cadmus.pre_retrieval.ncbi_id_converter_batch import ncbi_id_converter_batch
from cadmus.retrieval.HTTP_setup import HTTP_setup
from cadmus.pre_retrieval.get_crossref_links_and_licenses import get_crossref_links_and_licenses
from cadmus.main.retrieval import retrieval
from cadmus.retrieval.parse_link_retrieval import parse_link_retrieval
from cadmus.pre_retrieval.check_for_retrieved_df import check_for_retrieved_df
from cadmus.retrieval.clear import clear
from cadmus.post_retrieval.content_text import content_text
from cadmus.post_retrieval.evaluation import evaluation
from cadmus.post_retrieval.correct_date_format import correct_date_format
from cadmus.post_retrieval.clean_up_dir import clean_up_dir
def bioscraping(input_function, email, api_key, click_through_api_key, start = None, idx = None , full_search = None, keep_abstract = True):
# first bioscraping checks whether this is an update of a previous search or a new search.
# create all the output directories if they do not already exist
update = check_for_retrieved_df()
if update:
print('There is already a Retrieved Dataframe, we shall add new results to this existing dataframe, excluding duplicates.')
# load the original df to use downstream.
original_df = pickle.load(open('./output/retrieved_df/retrieved_df2.p', 'rb'))
# bioscraping needs to extract all the pmids where already we already have the content_text
# these pmids will then be removed from the the search df according to the parameter used for 'full_search'
original_pmids = []
drop_lines = []
# loop through all rows checking the criteria according to 'full_search'
if full_search == None:
# We are not updating the previous search(es) of the DataFrame, only looking for new lines
print('We are not updating the previous search(es) of the DataFrame, only looking for new lines')
original_pmids = (np.array(original_df.pmid))
if full_search == 'light':
# We are doing a light search, from the previous search we are only going to take a look at the missing content_text
print('We are doing a light search, from the previous search we are only going to take a look at the missing content_text')
for index, row in original_df.iterrows():
# checking what is present in the content_text field from the previous search, if it is not a full text, we want to try again
if row.content_text == '' or row.content_text == None or row.content_text != row.content_text or row.content_text[:4] == 'ABS:':
# keeping the pmid to replace the lines with the new line from this process to avoid duplicates
drop_lines.append(index)
else:
# removing these pmids from the search
original_pmids.append(row['pmid'])
if full_search == 'heavy':
# We are doing a heavy search, trying to find new taged version and pdf version from previous search
print('We are doing a heavy search, trying to find new taged version from previous search')
for index, row in original_df.iterrows():
# Looking if we have at least one tagged format with a pdf format
if (row['pdf'] == 1 and row['html'] == 1) or (row['pdf'] == 1 and row['xml'] == 1):
# removing these pmids from the search
original_pmids.append(row['pmid'])
else:
# keeping the pmid to replace the lines with the new line from this process to avoid duplicates
drop_lines.append(index)
else:
# check if a start position is given (this would suggest restarting a failed program)
if start != None:
pass
else:
print('This is a new project, creating all directories')
# search strings and pmid lists have the same basic pipeline +/- the search at the start
# checking the input type
# in principle, this step could be augmented to use DOIs or PMCIDs but this has not been implemented yet
if type(input_function) == str or input_function[0].isdigit() == True:
print('This look like a search string or list of pmids. \nIf this is not correct Please stop now')
if input_function == '':
print('You did not enter any search term')
else:
# run the search if the input is a string
if type(input_function) == str:
# This is the NCBI e-search step (PubMed API)when a query string is provided resulting in a list of pmids within a dictionary
results_d = search_terms_to_pmid_list(input_function, email, api_key)
else:
# if the input is a list of pmids we just need to make a results_d to maintain the output variables
# get todays date
date = datetime.datetime.today()
date = f'{date.year}_{date.month}_{date.day}_{date.hour}_{date.minute}'
# construct the output dict
results_d = {'date':date, 'search_term':'', 'total_count':len(input_function), 'pmids':input_function}
# save the output dictionary for our records of what terms used and number of records returned for a given date.
pickle.dump(results_d, open(f'./output/esearch_results/{date}.p', 'wb'))
# at this stage we need to check if the search is a new search or update of previous list.
if update:
# when this is an update we need to remove the previously used pmids from our current pipeline (the orignal df and new df will be merged at the end)
current_pmids = results_d.get('pmids')
# use set difference to get the new pmids only
new_pmids = list(set(current_pmids).difference(set(original_pmids)))
if len(new_pmids) == 0:
print('There are no new lines since your previous search - stop the function.')
exit()
else:
print(f'There are {len(new_pmids)} new results since last run.')
# set the new pmids into the results d for the next step
results_d.update({'pmids':new_pmids})
else:
# this project is new, no need to subset the pmids
pass
if idx != None and start == None:
print(f"You can't have your parameter idx not equal to None when start = None, changing your idx to None")
idx = None
# starting bioscraping from somewhere else than the begining, most likely due to a previous crash of the function
if start != None:
try:
# loading the 'moving' df to restart where we stop from
retrieved_df = pickle.load(open(f'./output/retrieved_df/retrieved_df.p','rb'))
if update:
# subset the df to keep only the new line
retrieved_df = retrieved_df[retrieved_df.pmid.isin(new_pmids)]
except:
print(f"You don't have any previous retrieved_df we changed your parameters start and idx to None")
start = None
idx = None
if start == None:
# make a medline records text file for a given list of pmids
medline_file_name = pmids_to_medline_file(results_d['date'], results_d['pmids'], email, api_key)
# parse the medline file and create a retrieved_df with unique indexes for each record
retrieved_df = pd.DataFrame(creation_retrieved_df(medline_file_name))
# standardise the empty values and ensure there are no duplicates of pmids or dois in our retrieved_df
retrieved_df.fillna(value=np.nan, inplace=True)
retrieved_df = retrieved_df.drop_duplicates(keep='first', ignore_index=False, subset=['doi', 'pmid'])
# use the NCBI id converter API to get any missing IDs known to the NCBI databases
retrieved_df = ncbi_id_converter_batch(retrieved_df, email)
# we now have a retrieved_df of metadata.
# We can use the previous retrieved_df index to exclude ones we have looked for already.
# set up the crossref metadata http request ('base')
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'base')
#create a new column to note whether there is a crossref metadata record available - default - 0 (NO).
retrieved_df['crossref'] = 0
# we're going to start collection for full text links now. so lets make a new column on the retrieved_df to hold a dictionary of links
retrieved_df['full_text_links'] = [{'cr_tdm':[],'html_parse':[], 'pubmed_links':[]} for value in retrieved_df.index]
retrieved_df['licenses'] = [{} for val in retrieved_df.index]
# work through the retrieved_df for every available doi and query crossref for full text links
retrieved_df = get_crossref_links_and_licenses(retrieved_df, http, base_url, headers)
# now time to download some fulltexts, will need to create some new columns to show success or failure for each format
# we'll also make some dictionaries to hold the parsed data and raw file details
retrieved_df['pdf'] = 0
retrieved_df['xml'] = 0
retrieved_df['html'] = 0
retrieved_df['plain'] = 0
retrieved_df['pmc_tgz'] = 0
retrieved_df['xml_parse_d'] = [{} for index in retrieved_df.index]
retrieved_df['html_parse_d'] = [{} for index in retrieved_df.index]
retrieved_df['pdf_parse_d'] = [{} for index in retrieved_df.index]
retrieved_df['plain_parse_d'] = [{} for index in retrieved_df.index]
pickle.dump(retrieved_df, open(f'./output/retrieved_df/retrieved_df.p', 'wb'))
else:
pass
# set up the http session for crossref requests
# http is the session object
# base URL is empty in this case
# headers include the clickthrough api key and email address
#this project is not trigered by a save
if start == None and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'crossref', keep_abstract)
#We skip all the previous step to start at the crossref step
elif start == 'crossref' and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'crossref', keep_abstract)
start = None
#we run the code only on crossref
elif start == 'crossref_only':
try:
# we load the previous result to re-run a step
retrieved_df2 = pickle.load(open(f'./output/retrieved_df/retrieved_df2.p', 'rb'))
if update:
#if in update mode keep only the row we are interested in
retrieved_df2 = retrieved_df2[retrieved_df2.pmid.isin(new_pmids)]
except:
retrieved_df2 = retrieved_df
if idx != None:
try:
# restart from the last index it was saved at
divide_at = retrieved_df2.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
# all the row that have already been done
finish = retrieved_df2[divide_at:]
# row that have not been done yet
done = retrieved_df2[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'crossref', keep_abstract, done = done)
retrieved_df2 = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'crossref', keep_abstract)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'crossref', keep_abstract)
# we start at the crossref step and at a specific index, could be related to a previous failled attempt
elif start == 'crossref' and idx != None:
try:
divide_at = retrieved_df.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df[divide_at:]
done = retrieved_df[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'crossref', keep_abstract, done = done)
retrieved_df = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
#change the start and the idx to none to complete all the next step with all the row
start = None
idx = None
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'crossref')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'crossref', keep_abstract)
start = None
idx = None
else:
pass
# After crossref, we are going on doi.org - this uses the doi provided and redirection to see if we land on the full text html page
if start == None and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'doiorg', keep_abstract)
elif start == 'doiorg' and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'doiorg', keep_abstract)
start = None
elif start == 'doiorg_only':
try:
retrieved_df2 = pickle.load(open(f'./output/retrieved_df/retrieved_df2.p', 'rb'))
if update:
retrieved_df2 = retrieved_df2[retrieved_df2.pmid.isin(new_pmids)]
except:
retrieved_df2 = retrieved_df
if idx != None:
try:
divide_at = retrieved_df2.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df2[divide_at:]
done = retrieved_df2[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'doiorg', keep_abstract, done = done)
retrieved_df2 = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'doiorg', keep_abstract)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'doiorg', keep_abstract)
elif start == 'doiorg' and idx != None:
try:
divide_at = retrieved_df.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df[divide_at:]
done = retrieved_df[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'doiorg', keep_abstract, done = done)
retrieved_df = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
start = None
idx = None
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'doiorg')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'doiorg', keep_abstract)
start = None
idx = None
else:
pass
#we continue by sending requests to europe pmc, looking for xml format
if start == None and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'epmcxml', keep_abstract)
elif start == 'epmcxml' and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'epmcxml', keep_abstract)
start = None
elif start == 'epmcxml_only':
try:
retrieved_df2 = pickle.load(open(f'./output/retrieved_df/retrieved_df2.p', 'rb'))
if update:
retrieved_df2 = retrieved_df2[retrieved_df2.pmid.isin(new_pmids)]
except:
retrieved_df2 = retrieved_df
if idx != None:
try:
divide_at = retrieved_df2.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df2[divide_at:]
done = retrieved_df2[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'epmcxml', keep_abstract, done = done)
retrieved_df2 = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'epmcxml', keep_abstract)
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
retrieved_df2 = retrieval(retrieved_df2, http, base_url, headers, 'epmcxml', keep_abstract)
elif start == 'epmcxml' and idx != None:
try:
divide_at = retrieved_df.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df[divide_at:]
done = retrieved_df[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'epmcxml', keep_abstract, done = done)
retrieved_df = pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True)
start = None
idx = None
else:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'epmcxml')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'epmcxml', keep_abstract)
start = None
idx = None
else:
pass
#pmc, xml format
if start == None and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'pmcxmls')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'pmcxmls', keep_abstract)
elif start == 'pmcxmls' and idx == None:
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'pmcxmls')
# now use the http request set up to request for each of the retrieved_df
retrieved_df = retrieval(retrieved_df, http, base_url, headers, 'pmcxmls', keep_abstract)
start = None
elif start == 'pmcxmls_only':
try:
retrieved_df2 = pickle.load(open(f'./output/retrieved_df/retrieved_df2.p', 'rb'))
if update:
retrieved_df2 = retrieved_df2[retrieved_df2.pmid.isin(new_pmids)]
except:
retrieved_df2 = retrieved_df
if idx != None:
try:
divide_at = retrieved_df2.index.get_loc(idx)
except:
print(f"The idx you enter was not found in the retrieved_df, please enter a correct index")
exit()
if divide_at != 0:
finish = retrieved_df2[divide_at:]
done = retrieved_df2[:divide_at]
http, base_url, headers = HTTP_setup(email, click_through_api_key, 'pmcxmls')
# now use the http request set up to request for each of the retrieved_df
finish = retrieval(finish, http, base_url, headers, 'pmcxmls', keep_abstract, done = done)
retrieved_df2 = | pd.concat([done, finish], axis=0, join='outer', ignore_index=False, copy=True) | pandas.concat |
import json
from os import listdir
import pandas as pd
import multiprocessing as mp
THRESHOLD=0.82
DIR='/mnt/ceph/storage/data-in-progress/data-research/web-search/SIGIR-21/sigir21-deduplicate-trec-run-files/'
def analyze_jsonl_line(line):
dedup_data = json.loads(line)
docs_to_remove = []
for sim in dedup_data['similarities']:
if sim['similarities']['s3'] >= THRESHOLD:
docs_to_remove += [sim['secondId']]
return {
'topic': dedup_data['topic'],
'duplicates': len(set(docs_to_remove)),
'docs': dedup_data['docs'],
}
def analyze_all_jsonl_lines(file_name):
with open(file_name) as f:
print('Process ' + file_name)
return mp.Pool(50).map(analyze_jsonl_line, [i for i in f])
def analyze_all_runs(run_files):
rows = []
for r in run_files:
rows += [i for i in analyze_all_jsonl_lines(r)]
return | pd.DataFrame(rows) | pandas.DataFrame |
import unittest
import pandas as pd
import numpy as np
from scipy.sparse.csr import csr_matrix
from string_grouper.string_grouper import DEFAULT_MIN_SIMILARITY, \
DEFAULT_REGEX, DEFAULT_NGRAM_SIZE, DEFAULT_N_PROCESSES, DEFAULT_IGNORE_CASE, \
StringGrouperConfig, StringGrouper, StringGrouperNotFitException, \
match_most_similar, group_similar_strings, match_strings, \
compute_pairwise_similarities
from unittest.mock import patch, Mock
def mock_symmetrize_matrix(x: csr_matrix) -> csr_matrix:
return x
class SimpleExample(object):
def __init__(self):
self.customers_df = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address4', '', 'Description4', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address5', 'Tel5', 'Description5', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.customers_df2 = pd.DataFrame(
[
('BB016741P', 'Mega Enterprises Corporation', 'Address0', 'Tel0', 'Description0', 0.2),
('CC082744L', 'Hyper Startup Incorporated', '', 'Tel1', '', 0.5),
('AA098762D', 'Hyper Startup Inc.', 'Address2', 'Tel2', 'Description2', 0.3),
('BB099931J', 'Hyper-Startup Inc.', 'Address3', 'Tel3', 'Description3', 0.1),
('DD012339M', 'HyperStartup Inc.', 'Address4', 'Tel4', 'Description4', 0.1),
('HH072982K', 'Hyper Hyper Inc.', 'Address5', '', 'Description5', 0.9),
('EE059082Q', 'Mega Enterprises Corp.', 'Address6', 'Tel6', 'Description6', 1.0)
],
columns=('Customer ID', 'Customer Name', 'Address', 'Tel', 'Description', 'weight')
)
self.a_few_strings = pd.Series(['BB016741P', 'BB082744L', 'BB098762D', 'BB099931J', 'BB072982K', 'BB059082Q'])
self.one_string = pd.Series(['BB0'])
self.two_strings = pd.Series(['Hyper', 'Hyp'])
self.whatever_series_1 = pd.Series(['whatever'])
self.expected_result_with_zeroes = pd.DataFrame(
[
(1, 'Hyper Startup Incorporated', 0.08170638, 'whatever', 0),
(0, 'Mega Enterprises Corporation', 0., 'whatever', 0),
(2, 'Hyper Startup Inc.', 0., 'whatever', 0),
(3, 'Hyper-Startup Inc.', 0., 'whatever', 0),
(4, 'Hyper Hyper Inc.', 0., 'whatever', 0),
(5, 'Mega Enterprises Corp.', 0., 'whatever', 0)
],
columns=['left_index', 'left_Customer Name', 'similarity', 'right_side', 'right_index']
)
self.expected_result_centroid = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Startup Inc.',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
self.expected_result_centroid_with_index_col = pd.DataFrame(
[
(0, 'Mega Enterprises Corporation'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(2, 'Hyper Startup Inc.'),
(4, 'Hyper Hyper Inc.'),
(0, 'Mega Enterprises Corporation')
],
columns=['group_rep_index', 'group_rep_Customer Name']
)
self.expected_result_first = pd.Series(
[
'Mega Enterprises Corporation',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Startup Incorporated',
'Hyper Hyper Inc.',
'Mega Enterprises Corporation'
],
name='group_rep_Customer Name'
)
class StringGrouperConfigTest(unittest.TestCase):
def test_config_defaults(self):
"""Empty initialisation should set default values"""
config = StringGrouperConfig()
self.assertEqual(config.min_similarity, DEFAULT_MIN_SIMILARITY)
self.assertEqual(config.max_n_matches, None)
self.assertEqual(config.regex, DEFAULT_REGEX)
self.assertEqual(config.ngram_size, DEFAULT_NGRAM_SIZE)
self.assertEqual(config.number_of_processes, DEFAULT_N_PROCESSES)
self.assertEqual(config.ignore_case, DEFAULT_IGNORE_CASE)
def test_config_immutable(self):
"""Configurations should be immutable"""
config = StringGrouperConfig()
with self.assertRaises(Exception) as _:
config.min_similarity = 0.1
def test_config_non_default_values(self):
"""Configurations should be immutable"""
config = StringGrouperConfig(min_similarity=0.1, max_n_matches=100, number_of_processes=1)
self.assertEqual(0.1, config.min_similarity)
self.assertEqual(100, config.max_n_matches)
self.assertEqual(1, config.number_of_processes)
class StringGrouperTest(unittest.TestCase):
def test_auto_blocking_single_DataFrame(self):
"""tests whether automatic blocking yields consistent results"""
# This function will force an OverflowError to occur when
# the input Series have a combined length above a given number:
# OverflowThreshold. This will in turn trigger automatic splitting
# of the Series/matrices into smaller blocks when n_blocks = None
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
# first do manual blocking
sg = StringGrouper(df1, min_similarity=0.1)
pd.testing.assert_series_equal(sg.master, df1)
self.assertEqual(sg.duplicates, None)
matches = fix_row_order(sg.match_strings(df1, n_blocks=(1, 1)))
self.assertEqual(sg._config.n_blocks, (1, 1))
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def do_test_with(OverflowThreshold):
nonlocal sg # allows reference to sg, as sg will be modified below
# Now let us mock sg._build_matches:
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
matches_auto = fix_row_order(sg.match_strings(df1, n_blocks=None))
pd.testing.assert_series_equal(sg.master, df1)
pd.testing.assert_frame_equal(matches, matches_auto)
self.assertEqual(sg._config.n_blocks, None)
# Note that _build_matches is called more than once if and only if
# a split occurred (that is, there was more than one pair of
# matrix-blocks multiplied)
if len(sg._left_Series) + len(sg._right_Series) > \
OverflowThreshold:
# Assert that split occurred:
self.assertGreater(sg._build_matches.call_count, 1)
else:
# Assert that split did not occur:
self.assertEqual(sg._build_matches.call_count, 1)
# now test auto blocking by forcing an OverflowError when the
# combined Series' lengths is greater than 10, 5, 3, 2
do_test_with(OverflowThreshold=100) # does not trigger auto blocking
do_test_with(OverflowThreshold=10)
do_test_with(OverflowThreshold=5)
do_test_with(OverflowThreshold=3)
do_test_with(OverflowThreshold=2)
def test_n_blocks_single_DataFrame(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
matches11 = fix_row_order(match_strings(df1, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
# Create a custom wrapper for this StringGrouper instance's
# _build_matches() method which will later be used to
# mock _build_matches().
# Note that we have to define the wrapper here because
# _build_matches() is a non-static function of StringGrouper
# and needs access to the specific StringGrouper instance sg
# created here.
sg = StringGrouper(df1, min_similarity=0.1)
def mock_build_matches(OverflowThreshold,
real_build_matches=sg._build_matches):
def wrapper(left_matrix,
right_matrix,
nnz_rows=None,
sort=True):
if (left_matrix.shape[0] + right_matrix.shape[0]) > \
OverflowThreshold:
raise OverflowError
return real_build_matches(left_matrix, right_matrix, nnz_rows, sort)
return wrapper
def test_overflow_error_with(OverflowThreshold, n_blocks):
nonlocal sg
sg._build_matches = Mock(side_effect=mock_build_matches(OverflowThreshold))
sg.clear_data()
max_left_block_size = (len(df1)//n_blocks[0]
+ (1 if len(df1) % n_blocks[0] > 0 else 0))
max_right_block_size = (len(df1)//n_blocks[1]
+ (1 if len(df1) % n_blocks[1] > 0 else 0))
if (max_left_block_size + max_right_block_size) > OverflowThreshold:
with self.assertRaises(Exception):
_ = sg.match_strings(df1, n_blocks=n_blocks)
else:
matches_manual = fix_row_order(sg.match_strings(df1, n_blocks=n_blocks))
pd.testing.assert_frame_equal(matches11, matches_manual)
test_overflow_error_with(OverflowThreshold=100, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(2, 1))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(1, 2))
test_overflow_error_with(OverflowThreshold=10, n_blocks=(4, 4))
def test_n_blocks_both_DataFrames(self):
"""tests whether manual blocking yields consistent results"""
sort_cols = ['right_index', 'left_index']
def fix_row_order(df):
return df.sort_values(sort_cols).reset_index(drop=True)
simple_example = SimpleExample()
df1 = simple_example.customers_df['Customer Name']
df2 = simple_example.customers_df2['Customer Name']
matches11 = fix_row_order(match_strings(df1, df2, min_similarity=0.1))
matches12 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches12)
matches13 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 3), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches13)
matches14 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 4), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches14)
matches15 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 5), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches15)
matches16 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 6), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches16)
matches17 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 7), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches17)
matches18 = fix_row_order(
match_strings(df1, df2, n_blocks=(1, 8), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches18)
matches21 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 1), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches21)
matches22 = fix_row_order(
match_strings(df1, df2, n_blocks=(2, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches22)
matches32 = fix_row_order(
match_strings(df1, df2, n_blocks=(3, 2), min_similarity=0.1))
pd.testing.assert_frame_equal(matches11, matches32)
def test_n_blocks_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=2)
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(0, 2))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2.5))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, 2, 3))
with self.assertRaises(Exception):
_ = match_strings(df1, n_blocks=(1, ))
def test_tfidf_dtype_bad_option_value(self):
"""Tests that bad option values for n_blocks are caught"""
simple_example = SimpleExample()
df1 = simple_example.customers_df2['<NAME>']
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=None)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype=0)
with self.assertRaises(Exception):
_ = match_strings(df1, tfidf_matrix_dtype='whatever')
def test_compute_pairwise_similarities(self):
"""tests the high-level function compute_pairwise_similarities"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
similarities = compute_pairwise_similarities(df1, df2)
expected_result = pd.Series(
[
1.0,
0.6336195351561589,
1.0000000000000004,
1.0000000000000004,
1.0,
0.826462625999832
],
name='similarity'
)
expected_result = expected_result.astype(np.float32)
pd.testing.assert_series_equal(expected_result, similarities)
sg = StringGrouper(df1, df2)
similarities = sg.compute_pairwise_similarities(df1, df2)
pd.testing.assert_series_equal(expected_result, similarities)
def test_compute_pairwise_similarities_data_integrity(self):
"""tests that an exception is raised whenever the lengths of the two input series of the high-level function
compute_pairwise_similarities are unequal"""
simple_example = SimpleExample()
df1 = simple_example.customers_df['<NAME>']
df2 = simple_example.expected_result_centroid
with self.assertRaises(Exception):
_ = compute_pairwise_similarities(df1, df2[:-2])
@patch('string_grouper.string_grouper.StringGrouper')
def test_group_similar_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function group_similar_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = group_similar_strings(
test_series_1,
string_ids=test_series_id_1
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_most_similar(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_most_similar utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_groups.return_value = 'whatever'
test_series_1 = None
test_series_2 = None
test_series_id_1 = None
test_series_id_2 = None
df = match_most_similar(
test_series_1,
test_series_2,
master_id=test_series_id_1,
duplicates_id=test_series_id_2
)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_groups.assert_called_once()
self.assertEqual(df, 'whatever')
@patch('string_grouper.string_grouper.StringGrouper')
def test_match_strings(self, mock_StringGouper):
"""mocks StringGrouper to test if the high-level function match_strings utilizes it as expected"""
mock_StringGrouper_instance = mock_StringGouper.return_value
mock_StringGrouper_instance.fit.return_value = mock_StringGrouper_instance
mock_StringGrouper_instance.get_matches.return_value = 'whatever'
test_series_1 = None
test_series_id_1 = None
df = match_strings(test_series_1, master_id=test_series_id_1)
mock_StringGrouper_instance.fit.assert_called_once()
mock_StringGrouper_instance.get_matches.assert_called_once()
self.assertEqual(df, 'whatever')
@patch(
'string_grouper.string_grouper.StringGrouper._symmetrize_matrix',
side_effect=mock_symmetrize_matrix
)
def test_match_list_symmetry_without_symmetrize_function(self, mock_symmetrize_matrix_param):
"""mocks StringGrouper._symmetrize_matches_list so that this test fails whenever _matches_list is
**partially** symmetric which often occurs when the kwarg max_n_matches is too small"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
mock_symmetrize_matrix_param.assert_called_once()
# obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# switch the column names of lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# if the intersection is empty then _matches_list is completely non-symmetric (this is acceptable)
# if the intersection is not empty then at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertFalse(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
def test_match_list_symmetry_with_symmetrize_function(self):
"""This test ensures that _matches_list is symmetric"""
simple_example = SimpleExample()
df = simple_example.customers_df2['<NAME>']
sg = StringGrouper(df, max_n_matches=2).fit()
# Obtain the upper and lower triangular parts of the matrix of matches:
upper = sg._matches_list[sg._matches_list['master_side'] < sg._matches_list['dupe_side']]
lower = sg._matches_list[sg._matches_list['master_side'] > sg._matches_list['dupe_side']]
# Switch the column names of the lower triangular part (i.e., transpose) to convert it to upper triangular:
upper_prime = lower.rename(columns={'master_side': 'dupe_side', 'dupe_side': 'master_side'})
# Obtain the intersection between upper and upper_prime:
intersection = upper_prime.merge(upper, how='inner', on=['master_side', 'dupe_side'])
# If the intersection is empty this means _matches_list is completely non-symmetric (this is acceptable)
# If the intersection is not empty this means at least some matches are repeated.
# To make sure all (and not just some) matches are repeated, the lengths of
# upper, upper_prime and their intersection should be identical.
self.assertTrue(intersection.empty or len(upper) == len(upper_prime) == len(intersection))
@patch(
'string_grouper.string_grouper.StringGrouper._fix_diagonal',
side_effect=mock_symmetrize_matrix
)
def test_match_list_diagonal_without_the_fix(self, mock_fix_diagonal):
"""test fails whenever _matches_list's number of self-joins is not equal to the number of strings"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['<NAME>']
matches = match_strings(df, max_n_matches=1)
mock_fix_diagonal.assert_called_once()
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertNotEqual(num_self_joins, num_strings)
def test_match_list_diagonal(self):
"""This test ensures that all self-joins are present"""
# This bug is difficult to reproduce -- I mostly encounter it while working with very large datasets;
# for small datasets setting max_n_matches=1 reproduces the bug
simple_example = SimpleExample()
df = simple_example.customers_df['Customer Name']
matches = match_strings(df, max_n_matches=1)
num_self_joins = len(matches[matches['left_index'] == matches['right_index']])
num_strings = len(df)
self.assertEqual(num_self_joins, num_strings)
def test_zero_min_similarity(self):
"""Since sparse matrices exclude zero elements, this test ensures that zero similarity matches are
returned when min_similarity <= 0. A bug related to this was first pointed out by @nbcvijanovic"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.whatever_series_1
matches = match_strings(s_master, s_dup, min_similarity=0)
pd.testing.assert_frame_equal(simple_example.expected_result_with_zeroes, matches)
def test_zero_min_similarity_small_max_n_matches(self):
"""This test ensures that a warning is issued when n_max_matches is suspected to be too small while
min_similarity <= 0 and include_zeroes is True"""
simple_example = SimpleExample()
s_master = simple_example.customers_df['Customer Name']
s_dup = simple_example.two_strings
with self.assertRaises(Exception):
_ = match_strings(s_master, s_dup, max_n_matches=1, min_similarity=0)
def test_get_non_matches_empty_case(self):
"""This test ensures that _get_non_matches() returns an empty DataFrame when all pairs of strings match"""
simple_example = SimpleExample()
s_master = simple_example.a_few_strings
s_dup = simple_example.one_string
sg = StringGrouper(s_master, s_dup, max_n_matches=len(s_master), min_similarity=0).fit()
self.assertTrue(sg._get_non_matches_list().empty)
def test_n_grams_case_unchanged(self):
"""Should return all ngrams in a string with case"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit do not ignore case
sg = StringGrouper(test_series, ignore_case=False)
expected_result = ['McD', 'cDo', 'Don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Explicit ignore case
sg = StringGrouper(test_series, ignore_case=True)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_n_grams_ignore_case_to_lower_with_defaults(self):
"""Should return all case insensitive ngrams in a string"""
test_series = pd.Series(pd.Series(['aaa']))
# Implicit default case (i.e. default behaviour)
sg = StringGrouper(test_series)
expected_result = ['mcd', 'cdo', 'don', 'ona', 'nal', 'ald', 'lds']
self.assertListEqual(expected_result, sg.n_grams('McDonalds'))
def test_build_matrix(self):
"""Should create a csr matrix only master"""
test_series = pd.Series(['foo', 'bar', 'baz'])
sg = StringGrouper(test_series)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
c = csr_matrix([[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
np.testing.assert_array_equal(c.toarray(), master.toarray())
np.testing.assert_array_equal(c.toarray(), dupe.toarray())
def test_build_matrix_master_and_duplicates(self):
"""Should create a csr matrix for master and duplicates"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
master_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 1., 0., 0.]])
dupes_expected = csr_matrix([[0., 0., 0., 1.],
[1., 0., 0., 0.],
[0., 0., 1., 0.]])
np.testing.assert_array_equal(master_expected.toarray(), master.toarray())
np.testing.assert_array_equal(dupes_expected.toarray(), dupe.toarray())
def test_build_matches(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
master, dupe = sg._get_right_tf_idf_matrix(), sg._get_left_tf_idf_matrix()
expected_matches = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
np.testing.assert_array_equal(expected_matches, sg._build_matches(master, dupe)[0].toarray())
def test_build_matches_list(self):
"""Should create the cosine similarity matrix of two series"""
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_case_insensitive_build_matches_list(self):
"""Should create the cosine similarity matrix of two case insensitive series"""
test_series_1 = pd.Series(['foo', 'BAR', 'baz'])
test_series_2 = pd.Series(['FOO', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2)
sg = sg.fit()
master = [0, 1]
dupe_side = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'master_side': master, 'dupe_side': dupe_side, 'similarity': similarity})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg._matches_list)
def test_get_matches_two_dataframes(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
sg = StringGrouper(test_series_1, test_series_2).fit()
left_side = ['foo', 'bar']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_single(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
sg = StringGrouper(test_series_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_index = [0, 3, 1, 2, 0, 3]
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side,
'similarity': similarity,
'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_1_series_1_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz', 'foo'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2', 'A3'])
sg = StringGrouper(test_series_1, master_id=test_series_id_1)
sg = sg.fit()
left_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
left_side_id = ['A0', 'A3', 'A1', 'A2', 'A0', 'A3']
left_index = [0, 3, 1, 2, 0, 3]
right_side = ['foo', 'foo', 'bar', 'baz', 'foo', 'foo']
right_side_id = ['A0', 'A0', 'A1', 'A2', 'A3', 'A3']
right_index = [0, 0, 1, 2, 3, 3]
similarity = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_2_series_2_id_series(self):
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
sg = StringGrouper(test_series_1, test_series_2, duplicates_id=test_series_id_2,
master_id=test_series_id_1).fit()
left_side = ['foo', 'bar']
left_side_id = ['A0', 'A1']
left_index = [0, 1]
right_side = ['foo', 'bar']
right_side_id = ['B0', 'B1']
right_index = [0, 1]
similarity = [1.0, 1.0]
expected_df = pd.DataFrame({'left_index': left_index, 'left_side': left_side, 'left_id': left_side_id,
'similarity': similarity,
'right_id': right_side_id, 'right_side': right_side, 'right_index': right_index})
expected_df.loc[:, 'similarity'] = expected_df.loc[:, 'similarity'].astype(sg._config.tfidf_matrix_dtype)
pd.testing.assert_frame_equal(expected_df, sg.get_matches())
def test_get_matches_raises_exception_if_unexpected_options_given(self):
# When the input id data does not correspond with its string data:
test_series_1 = pd.Series(['foo', 'bar', 'baz'])
bad_test_series_id_1 = pd.Series(['A0', 'A1'])
good_test_series_id_1 = pd.Series(['A0', 'A1', 'A2'])
test_series_2 = pd.Series(['foo', 'bar', 'bop'])
bad_test_series_id_2 = pd.Series(['B0', 'B1'])
good_test_series_id_2 = pd.Series(['B0', 'B1', 'B2'])
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=bad_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, duplicates_id=bad_test_series_id_2,
master_id=good_test_series_id_1)
# When the input data is ok but the option combinations are invalid:
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, master_id=good_test_series_id_1)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, test_series_2, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, duplicates_id=good_test_series_id_2)
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, master_id=good_test_series_id_1, ignore_index=True, replace_na=True)
# Here we force an exception by making the number of index-levels of duplicates different from master:
# and setting replace_na=True
test_series_2.index = pd.MultiIndex.from_tuples(list(zip(list('ABC'), [0, 1, 2])))
with self.assertRaises(Exception):
_ = StringGrouper(test_series_1, duplicates=test_series_2, replace_na=True)
def test_get_groups_single_df_group_rep_default(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
sg = StringGrouper(customers_df['Customer Name'])
pd.testing.assert_series_equal(
simple_example.expected_result_centroid,
sg.group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=True
)
)
def test_get_groups_single_valued_series(self):
"""This test ensures that get_groups() returns a single-valued DataFrame or Series object
since the input-series is also single-valued. This test was created in response to a bug discovered
by <NAME>"""
pd.testing.assert_frame_equal(
pd.DataFrame([(0, "hello")], columns=['group_rep_index', 'group_rep']),
group_similar_strings(
pd.Series(["hello"]),
min_similarity=0.6
)
)
pd.testing.assert_series_equal(
pd.Series(["hello"], name='group_rep'),
group_similar_strings(
pd.Series(["hello"]),
min_similarity=0.6,
ignore_index=True
)
)
pd.testing.assert_frame_equal(
pd.DataFrame([(0, "hello")], columns=['most_similar_index', 'most_similar_master']),
match_most_similar(
pd.Series(["hello"]),
pd.Series(["hello"]),
min_similarity=0.6
)
)
pd.testing.assert_frame_equal(
pd.DataFrame([(0, "hello")], columns=['most_similar_index', 'most_similar_master']),
match_most_similar(
pd.Series(["hello"]),
pd.Series(["hello"]),
min_similarity=0.6,
max_n_matches=20
)
)
pd.testing.assert_series_equal(
pd.Series(["hello"], name='most_similar_master'),
match_most_similar(
pd.Series(["hello"]),
pd.Series(["hello"]),
min_similarity=0.6,
ignore_index=True
)
)
def test_get_groups_single_df_keep_index(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings with their indexes displayed in columns"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
pd.testing.assert_frame_equal(
simple_example.expected_result_centroid_with_index_col,
group_similar_strings(
customers_df['Customer Name'],
min_similarity=0.6,
ignore_index=False
)
)
def test_get_groups_single_df_group_rep_centroid(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
pd.testing.assert_series_equal(
simple_example.expected_result_first,
group_similar_strings(
customers_df['Customer Name'],
group_rep='first',
min_similarity=0.6,
ignore_index=True
)
)
def test_get_groups_single_df_group_rep_bad_option_value(self):
"""Should raise an exception when group_rep value given is neither 'centroid' nor 'first'"""
simple_example = SimpleExample()
customers_df = simple_example.customers_df
with self.assertRaises(Exception):
_ = group_similar_strings(
customers_df['Customer Name'],
group_rep='nonsense',
min_similarity=0.6
)
def test_get_groups_single_df(self):
"""Should return a pd.Series object with the same length as the original df. The series object will contain
a list of the grouped strings"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, ignore_index=True)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'bar', 'baz', 'foooo'], name='group_rep')
pd.testing.assert_series_equal(expected_result, result)
def test_get_groups_1_string_series_1_id_series(self):
"""Should return a pd.DataFrame object with the same length as the original df. The series object will contain
a list of the grouped strings"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
test_series_id_1 = pd.Series(['A0', 'A1', 'A2', 'A3'])
sg = StringGrouper(test_series_1, master_id=test_series_id_1, ignore_index=True)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.DataFrame(list(zip(['A0', 'A1', 'A2', 'A0'], ['foooo', 'bar', 'baz', 'foooo'])),
columns=['group_rep_id', 'group_rep'])
pd.testing.assert_frame_equal(expected_result, result)
def test_get_groups_two_df(self):
"""Should return a pd.Series object with the length of the dupes. The series will contain the master string
that matches the dupe with the highest similarity"""
test_series_1 = pd.Series(['foooo', 'bar', 'baz'])
test_series_2 = pd.Series(['foooo', 'bar', 'baz', 'foooob'])
sg = StringGrouper(test_series_1, test_series_2, ignore_index=True)
sg = sg.fit()
result = sg.get_groups()
expected_result = pd.Series(['foooo', 'bar', 'baz', 'foooo'], name='most_similar_master')
| pd.testing.assert_series_equal(expected_result, result) | pandas.testing.assert_series_equal |
from typing import *
import numpy as np
import argparse
from toolz.itertoolz import get
import zarr
import re
import sys
import logging
import pickle
import pandas as pd
from sympy import Point, Line
from skimage import feature, measure, morphology, img_as_float
from skimage.filters import rank_order
from scipy import ndimage as nd
from pathlib import Path
from pysmFISH.utils import convert_from_uint16_to_float64
from pysmFISH.data_models import Output_models
from pysmFISH.logger_utils import selected_logger
class osmFISH_dots_thr_selection():
"""
Class used to automatically define the threshold used to call the
signal peaks. This function calculate the threshold without masking large object
and contamination.
This is the original class used in the osmFISH paper.
"""
def __init__(self, img:np.ndarray, parameters_dict:Dict, min_int:float=False, max_int:float=False,min_peaks:int=False):
"""Initialize the class
Args:
img (np.ndarray): Image to process
parameters_dict (Dict): Parameters used to define the peaks.
min_int (float, optional): Minimum intensity value to use for the binning of
the signal intensities. Defaults to False.
max_int (float, optional): Maximum intensity value to use for the binning of
the signal intensities. Defaults to False.
min_peaks (int, optional): Minimum number of peaks required for the
calculation of the counting threshold. Defaults to False.
"""
self.img = img
self.parameters_dict = parameters_dict
self.min_int = min_int
self.max_int = max_int
self.min_peaks = min_peaks
if self.min_peaks == False:
self.min_peaks = 3
self.min_distance = self.parameters_dict['min_distance']
self.fill_value = np.nan
# List with the total peaks calculated for each threshold
self.total_peaks = []
self.thr_used = []
def counting_graph(self):
"""Function used for the construction of the number of peaks(Y) / thresholds(X)
graph used to define the threshold.
"""
binning = 100
# Define the range of thr to be tested
if self.img.max() == 0:
self.thr_array = []
else:
if self.min_int and self.max_int:
self.thr_array = np.linspace(self.min_int,self.max_int,num=binning)
elif self.min_int:
self.thr_array = np.linspace(self.min_int,self.img.max(),num=binning)
elif self.max_int:
self.thr_array = np.linspace(np.min(self.img[np.nonzero(self.img)]),self.max_int,num=binning)
else:
self.thr_array = np.linspace(np.min(self.img[np.nonzero(self.img)]),self.img.max(),num=binning)
# Calculate the number of peaks for each threshold. In this calculation
# the size of the objects is not considered
self.peak_counter_min = 0
self.peak_counter_max = 0
for vl, thr in enumerate(self.thr_array):
# The border is excluded from the counting
self.peaks = feature.peak_local_max(self.img,min_distance=self.min_distance,\
threshold_abs=thr,exclude_border=False, indices=True,\
num_peaks=np.inf, footprint=None,labels=None)
self.number_peaks = len(self.peaks)
# Stop the counting when the number of peaks detected falls below 3
if self.number_peaks<=self.min_peaks:
self.stop_thr = thr # Move in the upper loop so you will stop at the previous thr
break
else:
self.total_peaks.append(len(self.peaks))
self.thr_used.append(thr)
def thr_identification(self):
"""Function that use the number of peaks / thresholds graph to define the threshold
to used for the counting.
- calculate the gradient of the number of peaks / threshold function
- remove the initial minimum point
- calculate the segment that join the extremities of the gradient. This version
of the code uses sympy.
- Calculate the thr corresponding to the point of max distance from the segment
"""
# Consider the case of no detectected peaks or if there is only one Thr
# that create peaks (list total_peaks have only one element and )
# if np.array(total_peaks).sum()>0 or len(total_peaks)>1:
if len(self.total_peaks)>1:
# Trim the threshold array in order to match the stopping point
# used the [0][0] to get the first number and then take it out from list
# thr_array = thr_array[:np.where(thr_array==stop_thr)[0][0]]
self.thr_array = np.array(self.thr_used)
# Calculate the gradient of the number of peaks distribution
grad = np.gradient(self.total_peaks)
# Restructure the data in order to avoid to consider the min_peak in the
# calculations
# Coord of the gradient min_peak
grad_min_peak_coord = np.argmin(grad)
# Trim the data to remove the peak.
self.trimmed_thr_array = self.thr_array[grad_min_peak_coord:]
self.trimmed_grad = grad[grad_min_peak_coord:]
if self.trimmed_thr_array.shape>(1,):
# Trim the coords array in order to maintain the same length of the
# tr and pk
self.trimmed_total_peaks = self.total_peaks[grad_min_peak_coord:]
# To determine the threshold we will determine the Thr with the biggest
# distance to the segment that join the end points of the calculated
# gradient
# Distances list
distances = []
# Calculate the coords of the end points of the gradient
p1 = Point(self.trimmed_thr_array[0],self.trimmed_grad[0])
p2 = Point(self.trimmed_thr_array[-1],self.trimmed_grad[-1])
# Create a line that join the points
s = Line(p1,p2)
allpoints = np.arange(0,len(self.trimmed_thr_array))
# Calculate the distance between all points and the line
for p in allpoints:
dst = s.distance(Point(self.trimmed_thr_array[p],self.trimmed_grad[p]))
distances.append(dst.evalf())
# Remove the end points from the lists
self.trimmed_thr_array = self.trimmed_thr_array[1:-1]
self.trimmed_grad = self.trimmed_grad[1:-1]
self.trimmed_total_peaks = self.trimmed_total_peaks[1:-1]
self.trimmed_distances = distances[1:-1]
# Determine the coords of the selected Thr
# Converted trimmed_distances to array because it crashed
# on Sanger.
if self.trimmed_distances: # Most efficient way will be to consider the length of Thr list
thr_idx = np.argmax(np.array(self.trimmed_distances))
self.selected_thr = self.trimmed_thr_array[thr_idx]
# The selected threshold usually causes oversampling of the number of dots
# I added a stringency parameter (int n) to use to select the Thr+n
# for the counting. It selects a stringency only if the trimmed_thr_array
# is long enough. Also consider the case in which the stringency in negative
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
else:
self.selected_thr = self.fill_value
self.trimmed_thr_array = self.fill_value
class osmFISH_dots_mapping():
"""Function used to count the peaks after identification of the threshold
and masking of large objects.
This is the original class used in the osmFISH paper.
"""
def __init__(self,img: np.ndarray,thr: float,parameters_dict: dict):
"""Class initialization
Args:
img (np.ndarray): Image to process
thr (float): Precalculate threshold for masking the image
parameters_dict (dict): Parameters used to define the peaks.
"""
# Calculate the selected peaks after removal of the big and small objects
self.img = img
self.thr = thr
# make an error if selected Thr <0
self.parameters_dict = parameters_dict
self.min_distance = self.parameters_dict['min_distance']
self.min_obj_size = self.parameters_dict['min_obj_size']
self.max_obj_size = self.parameters_dict['max_obj_size']
self.num_peaks_per_label = self.parameters_dict['num_peaks_per_label']
self.fill_value = np.nan
# Threshold the image using the selected threshold
img_mask = self.img>self.thr
labels = nd.label(img_mask)[0]
properties = measure.regionprops(labels)
for ob in properties:
if ob.area<self.min_obj_size or ob.area>self.max_obj_size:
img_mask[ob.coords[:,0],ob.coords[:,1]]=0
labels = nd.label(img_mask)[0]
# Collect the properties of the labels after size selection
properties = measure.regionprops(labels,intensity_image=self.img)
self.selected_peaks = feature.peak_local_max(self.img, min_distance=self.min_distance,
threshold_abs=self.thr, exclude_border=True,
footprint=None, labels=labels,num_peaks_per_label=self.num_peaks_per_label)
# # calling peak_local_max without Labels argument
# selected_peaks_mask = feature.peak_local_max(self.img, min_distance=self.min_distance,
# threshold_abs=self.thr, exclude_border=True,
# footprint=None,num_peaks=np.inf,indices=False).astype(int)
# # instead, make sure the selected peaks does not meet zeros at labels (background)
# labels_mask = (labels > 0).astype(int)
# selected_peaks_mask = selected_peaks_mask * labels_mask
# self.selected_peaks = np.vstack(np.where(selected_peaks_mask)).T
if self.selected_peaks.size:
self.intensity_array = self.img[self.selected_peaks[:,0],self.selected_peaks[:,1]]
else:
self.intensity_array = np.nan
def peak_thrs_local_max_fast(image: np.ndarray, min_distance: int=1,
threshold_abs: float=None,threshold_rel:float=None,
exclude_border: int=True, indices: bool=True,
num_peaks: int=np.inf, footprint: np.ndarray=None,
labels: np.ndarray=None)->np.ndarray:
"""Function after modification:
returns the coordinates for a range of thresholds
Peaks are the local maxima in a region of `2 * min_distance + 1`
(i.e. peaks are separated by at least `min_distance`).
If peaks are flat (i.e. multiple adjacent pixels have identical
intensities), the coordinates of all such pixels are returned.
If both `threshold_abs` and `threshold_rel` are provided, the maximum
of the two is chosen as the minimum intensity threshold of peaks.
Notes
-----
The peak local maximum function returns the coordinates of local peaks
(maxima) in an image. A maximum filter is used for finding local maxima.
This operation dilates the original image. After comparison of the dilated
and original image, this function returns the coordinates or a mask of the
peaks where the dilated image equals the original image.
Examples
--------
>>> img1 = np.zeros((7, 7))
>>> img1[3, 4] = 1
>>> img1[3, 2] = 1.5
>>> img1
array([[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 1.5, 0. , 1. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0. , 0. ]])
>>> peak_local_max(img1, min_distance=1)
array([[3, 2],
[3, 4]])
>>> peak_local_max(img1, min_distance=2)
array([[3, 2]])
>>> img2 = np.zeros((20, 20, 20))
>>> img2[10, 10, 10] = 1
>>> peak_local_max(img2, exclude_border=0)
array([[10, 10, 10]])
Args:
image (np.ndarray): Input image.
min_distance (int, optional): Minimum number of pixels separating peaks in a region of `2 *
min_distance + 1` (i.e. peaks are separated by at least
`min_distance`). Defaults to 1.
threshold_abs (float, optional): Minimum intensity of peaks. By default, the absolute threshold is
the minimum intensity of the image. Defaults to None.
threshold_rel (float, optional): Minimum intensity of peaks, calculated as `max(image) * threshold_rel`.
Defaults to None.
exclude_border (int, optional): If nonzero, `exclude_border` excludes peaks from
within `exclude_border`-pixels of the border of the image.. Defaults to True.
indices (bool, optional): If True, the output will be an array representing peak
coordinates. If False, the output will be a boolean array shaped as
`image.shape` with peaks present at True elements.. Defaults to True.
num_peaks (int, optional): Maximum number of peaks. When the number of peaks exceeds `num_peaks`,
return `num_peaks` peaks based on highest peak intensity.. Defaults to np.inf.
footprint (np.ndarray, optional): If provided, `footprint == 1` represents the local region within which
to search for peaks at every point in `image`. Overrides
`min_distance` (also for `exclude_border`).. Defaults to None.
labels (np.ndarray, optional): If provided, each unique region `labels == value` represents a unique
region to search for peaks. Zero is reserved for background.. Defaults to None.
Returns:
np.ndarray: If `indices = True` : (row, column, ...) coordinates of peaks.
If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
"""
if type(exclude_border) == bool:
exclude_border = min_distance if exclude_border else 0
out = np.zeros_like(image, dtype=np.bool)
# In the case of labels, recursively build and return an output
# operating on each label separately
if labels is not None:
label_values = np.unique(labels)
# Reorder label values to have consecutive integers (no gaps)
if np.any(np.diff(label_values) != 1):
mask = labels >= 1
labels[mask] = 1 + rank_order(labels[mask])[0].astype(labels.dtype)
labels = labels.astype(np.int32)
# New values for new ordering
label_values = np.unique(labels)
for label in label_values[label_values != 0]:
maskim = (labels == label)
out += feature.peak_local_max(image * maskim, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=False, num_peaks=np.inf,
footprint=footprint, labels=None)
if indices is True:
return np.transpose(out.nonzero())
else:
return out.astype(np.bool)
if np.all(image == image.flat[0]):
if indices is True:
return np.empty((0, 2), np.int)
else:
return out
# Non maximum filter
if footprint is not None:
image_max = nd.maximum_filter(image, footprint=footprint,
mode='constant')
else:
size = 2 * min_distance + 1
image_max = nd.maximum_filter(image, size=size, mode='constant')
mask = image == image_max
if exclude_border:
# zero out the image borders
for i in range(mask.ndim):
mask = mask.swapaxes(0, i)
remove = (footprint.shape[i] if footprint is not None
else 2 * exclude_border)
mask[:remove // 2] = mask[-remove // 2:] = False
mask = mask.swapaxes(0, i)
# find top peak candidates above a threshold
thresholds = []
if threshold_abs is None:
threshold_abs = image.min()
thresholds.append(threshold_abs)
if threshold_rel is not None:
thresholds.append(threshold_rel * image.max())
if thresholds:
mask_original = mask # save the local maxima's of the image
thrs_coords = {} # dictionary holds the coordinates correspond for each threshold
for threshold in thresholds[0]:
mask = mask_original
mask &= image > threshold
# get coordinates of peaks
coordinates = np.transpose(mask.nonzero())
if coordinates.shape[0] > num_peaks:
intensities = image.flat[np.ravel_multi_index(coordinates.transpose(),
image.shape)]
idx_maxsort = np.argsort(intensities)[::-1]
coordinates = coordinates[idx_maxsort][:num_peaks]
if indices is True:
thrs_coords[threshold] = coordinates
else:
nd_indices = tuple(coordinates.T)
out[nd_indices] = True
return out
if thresholds and thrs_coords:
return thrs_coords
def osmFISH_peak_based_detection_fast(ImgStack: np.ndarray,
fov_subdataset: pd.Series,
parameters_dict: dict,
dimensions: int=2,
stringency:int =0,
min_int:float=False,
max_int:float=False,
min_peaks:int=False)->pd.DataFrame:
"""This function is used to calculate the threshold to use for the dots
counting in a 3D image. It is based on the function used in the osmFISH
paper but doesn’t require simpy. It integrate the peak_thrs_local_max_fast
and the calculation of the peaks on the masked image.
Args:
ImgStack (np.ndarray): preprocessed image used to count the dots
fov_subdataset (pd.Series): Series with the metadata info relative to the image to process
parameters_dict (dict): Parameters used to define the peaks.
dimensions (int, optional): Image dimension (2 for 2D or 3 for 3D). Defaults to 2.
stringency (int, optional): Select a thr before or after the one calculated
automatically. Defaults to 0.
min_int (float, optional): Minimum intensity value to use for the binning of
the signal intensities. Defaults to False.
max_int (float, optional): Maximum intensity value to use for the binning of
the signal intensities. Defaults to False.
min_peaks (int, optional): Minimum number of peaks required for the
calculation of the counting threshold. Defaults to False.
Returns:
pd.DataFrame: counts data
"""
logger = selected_logger()
if min_peaks == False:
min_peaks = 3
fill_value = np.nan
# List with the total peaks calculated for each threshold
thr_used = []
binning = 100
# Define the range of thr to be tested
if min_int and max_int:
ThrArray = np.linspace(min_int,max_int,num=binning)
elif min_int:
ThrArray = np.linspace(min_int,ImgStack.max(),num=binning)
elif max_int:
ThrArray = np.linspace(np.min(ImgStack[np.nonzero(ImgStack)]),max_int,num=binning)
else:
ThrArray = np.linspace(np.min(ImgStack[np.nonzero(ImgStack)]),ImgStack.max(),num=binning)
fov = fov_subdataset.fov_num
round_num = fov_subdataset.round_num
channel = fov_subdataset.channel
target_name = fov_subdataset.target_name
fill_value = np.nan
data_models = Output_models()
counts_dict = data_models.dots_counts_dict
# Initialise an empty version of the counts dict
counts_dict['r_px_original'] = np.array([fill_value])
counts_dict['c_px_original'] = np.array([fill_value])
counts_dict['dot_id'] = np.array([fill_value])
counts_dict['dot_intensity'] = np.array([fill_value])
counts_dict['selected_thr'] = np.array([fill_value])
min_distance = parameters_dict['CountingFishMinObjDistance']
min_obj_size = parameters_dict['CountingFishMinObjSize']
max_obj_size = parameters_dict['CountingFishMaxObjSize']
#num_peaks_per_label = self.parameters_dict['num_peaks_per_label']
fov_subdataset_df = pd.DataFrame(fov_subdataset).T
# List of ndarrays with the coords of the peaks calculated for each threshold
PeaksCoords = []
Selected_Peaks2_mask = None
# Determine if working in 2D or 3D
if dimensions == 2:
if len(ImgStack.shape) > 2:
ImgStack = np.amax(ImgStack, axis=0)
# Define the Thr array
# ThrArray = np.linspace(ImgStack.min(), ImgStack.max(), num=binning)
# Calculate the number of peaks for each threshold
# Exclude border beacause of artefact of image processing
thrs_peaks = peak_thrs_local_max_fast(ImgStack, min_distance=min_distance,
threshold_abs=ThrArray, exclude_border=True, indices=True,
num_peaks=np.inf, footprint=None, labels=None)
lists = sorted(thrs_peaks.items()) # sorted by key, return a list of tuples. tuple[0]: threshold, tuple[1]: coords
x, PeaksCoords = zip(*lists) # unpack a list of pairs into two tuples
TotalPeaks = []
for j in range(len(PeaksCoords)):
TotalPeaks += (len(PeaksCoords[j]),) # get number of peaks
# print("Thresholds distribution %.3f seconds" % (timings['thrs_dist']))
# Consider the case of no detectected peaks or if there is only one Thr
# that create peaks (list TotalPeaks have only one element and )
# if np.array(TotalPeaks).sum()>0 or len(TotalPeaks)>1:
if len(TotalPeaks) > 3:
# Trim the threshold array in order to match the stopping point
# used the [0][0] to get the first number and then take it out from list
# ThrArray = ThrArray[:np.where(ThrArray == StopThr)[0][0]]
# Trim and convert to types as Simone's
TotalPeaks = np.array(TotalPeaks)
TotalPeaks = list(TotalPeaks[TotalPeaks > min_peaks])
ThrArray = ThrArray[:len(TotalPeaks)]
PeaksCoords = np.array(PeaksCoords)
PeaksCoords = PeaksCoords[:len(TotalPeaks)]
PeaksCoords = list(PeaksCoords)
if len(TotalPeaks) > 3:
# Calculate the gradient of the number of peaks distribution
# grad = np.gradient(TotalPeaks)
grad = np.gradient(TotalPeaks,edge_order=1)
# Restructure the data in order to avoid to consider the min_peak in the
# calculations
# Coord of the gradient min_peak
grad_min_peak_coord = np.argmin(grad)
# Trim the data to remove the peak.
trimmed_thr_array = ThrArray[grad_min_peak_coord:]
trimmed_grad = grad[grad_min_peak_coord:]
if trimmed_thr_array.shape > (1,):
# Trim the coords array in order to maintain the same length of the
# tr and pk
Trimmed_PeaksCoords = PeaksCoords[grad_min_peak_coord:]
trimmed_total_peaks = TotalPeaks[grad_min_peak_coord:]
# To determine the threshold we will determine the Thr with the biggest
# distance to the segment that join the end points of the calculated
# # gradient
# Calculate the coords of the end points of the gradient
p1 = np.array([trimmed_thr_array[0],trimmed_grad[0]])
p2 = np.array([trimmed_thr_array[-1],trimmed_grad[-1]])
# Create a line that join the points
allpoints = np.arange(0,len(trimmed_thr_array))
allpoints_coords = np.array([trimmed_thr_array[allpoints],trimmed_grad[allpoints]]).T
distances = []
for point in allpoints_coords:
distances.append(np.linalg.norm(np.cross(p2-p1, p1-point))/np.linalg.norm(p2-p1))
# Remove the end points from the lists
trimmed_thr_array = trimmed_thr_array[1:-1]
trimmed_grad = trimmed_grad[1:-1]
trimmed_total_peaks = trimmed_total_peaks[1:-1]
trimmed_distances = distances[1:-1]
# Determine the coords of the selected Thr
# Converted Trimmed_distances to array because it crashed
# on Sanger.
if trimmed_distances: # Most efficient way will be to consider the length of Thr list
Thr_idx = np.argmax(np.array(trimmed_distances))
Calculated_Thr = trimmed_thr_array[Thr_idx]
# The selected threshold usually causes oversampling of the number of dots
# I added a stringency parameter (int n) to use to select the Thr+n
# for the counting. It selects a stringency only if the Trimmed_ThrArray
# is long enough
if Thr_idx + stringency < len(trimmed_thr_array):
Selected_Thr = trimmed_thr_array[Thr_idx + stringency]
Selected_Peaks = Trimmed_PeaksCoords[Thr_idx + stringency]
else:
Selected_Thr = trimmed_thr_array[Thr_idx]
Selected_Peaks = Trimmed_PeaksCoords[Thr_idx]
# Calculate the selected peaks after removal of the big and small objects
# Threshold the image using the selected threshold
# if Selected_Thr > 0:
# ImgMask = ImgStack > Selected_Thr
ImgMask = ImgStack > Selected_Thr
Labels = nd.label(ImgMask)[0]
Properties = measure.regionprops(Labels)
for ob in Properties:
if ob.area < min_obj_size or ob.area > max_obj_size:
ImgMask[ob.coords[:, 0], ob.coords[:, 1]] = 0
Labels = nd.label(ImgMask)[0]
# # calling peak_local_max without Labels argument
# Selected_Peaks2_mask = feature.peak_local_max(ImgStack, min_distance=min_distance,
# threshold_abs=Selected_Thr, exclude_border=True, indices=False,
# footprint=None,num_peaks=np.inf).astype(int)
# # instead, make sure the selected peaks does not meet zeros at labels (background)
# Labels_mask = (Labels > 0).astype(int)
# Selected_Peaks2_mask = Selected_Peaks2_mask * Labels_mask
# Selected_Peaks2 = np.vstack(np.where(Selected_Peaks2_mask)).T
Selected_Peaks2 = feature.peak_local_max(ImgStack, min_distance=min_distance,
threshold_abs=Selected_Thr, exclude_border=True, indices=True,
footprint=None,labels=Labels,num_peaks=np.inf).astype(int)
if Selected_Peaks2.size:
# Intensity counting of the max peaks
# Selected_peaks_coords = np.where(Selected_Peaks2)
# Selected_peaks_int = ImgStack[Selected_peaks_coords[0], Selected_peaks_coords[1]]
Selected_peaks_int = ImgStack[Selected_Peaks2[:, 0], Selected_Peaks2[:, 1]]
# Peaks have been identified
total_dots = Selected_Peaks2.shape[0]
dot_id_array = np.array([str(fov)+'_'+str(round_num)+'_'+ channel +'_'+str(nid) for nid in range(total_dots)])
thr_array = np.repeat(Selected_Thr,total_dots)
channel_array = np.repeat(channel,total_dots)
counts_dict['r_px_original'] = Selected_Peaks2[:,0]
counts_dict['c_px_original'] = Selected_Peaks2[:,1]
counts_dict['dot_id'] = dot_id_array
counts_dict['dot_intensity'] = Selected_peaks_int
counts_dict['selected_thr'] = thr_array
else:
logger.info(f' fov {fov} does not have counts (mapping)')
else:
logger.info(f' fov {fov} Trimmed distance equal to zero')
else:
logger.info(f' fov {fov} calculated Thr array to small for selection of Thr')
else:
logger.info(f' fov {fov} does not have counts for calculating Thr')
else:
logger.info(f' fov {fov} does not have counts for calculating Thr')
counts_df = pd.DataFrame(counts_dict)
fov_subdataset_df = pd.DataFrame(fov_subdataset).T
fov_subdataset_df = pd.concat([fov_subdataset_df]*counts_df.shape[0],axis=0).sort_index().reset_index(drop=True)
counts_df = pd.concat([counts_df,fov_subdataset_df],axis=1)
return counts_df
def beads_peak_based_detection(img: np.ndarray,
fov_subdataset: pd.Series,
processing_parameters: Dict)->pd.DataFrame:
"""Counts the peaks in the reference images with small and large beads.
It first identify the large beads and then mask them and identify the
small ones.
Args:
img (np.ndarray): Reference image with large and small beads
fov_subdataset (pd.Series): Series with the metadata info relative to the image to process
processing_parameters (Dict): Parameters used to define the peaks.
Returns:
pd.DataFrame: Beads counts
"""
stitching_type = fov_subdataset.stitching_type
LargeObjRemovalPercentile = processing_parameters['LargeObjRemovalPercentile']
LargeObjRemovalMinObjSize = processing_parameters['LargeObjRemovalMinObjSize']
LargeObjRemovalSelem = processing_parameters['LargeObjRemovalSelem']
if stitching_type == 'both-beads':
large_beads_counts_df = osmFISH_peak_based_detection_fast(img,fov_subdataset,processing_parameters)
large_beads_counts_df['mapped_beads_type'] = 'large'
mask = np.zeros_like(img)
idx= img > np.percentile(img,LargeObjRemovalPercentile)
mask[idx] = 1
labels = nd.label(mask)
properties = measure.regionprops(labels[0])
for ob in properties:
if ob.area < LargeObjRemovalMinObjSize:
mask[ob.coords[:,0],ob.coords[:,1]]=0
mask = morphology.binary_dilation(mask, selem=morphology.disk(LargeObjRemovalSelem))
mask = np.logical_not(mask)
masked_img = img*mask
processing_parameters_small = {
'CountingFishMinObjDistance': 5,
'CountingFishMaxObjSize': 20,
'CountingFishMinObjSize': 2,
'CountingFishNumPeaksPerLabel': 1}
small_beads_counts_df = osmFISH_peak_based_detection_fast(masked_img,fov_subdataset,processing_parameters_small)
small_beads_counts_df['mapped_beads_type'] = 'small'
counts_df = pd.concat([large_beads_counts_df,small_beads_counts_df], axis=0, copy=False)
elif stitching_type == 'small-beads':
counts_df = osmFISH_peak_based_detection_fast(img,fov_subdataset,processing_parameters)
counts_df['mapped_beads_type'] = 'small'
elif stitching_type == 'large-beads':
counts_df = osmFISH_peak_based_detection_fast(img,fov_subdataset,processing_parameters)
counts_df['mapped_beads_type'] = 'large'
return counts_df
def osmFISH_peak_based_detection(ImgStack: np.ndarray,
fov_subdataset: pd.Series,
parameters_dict: dict):
"""
This funtion apply the same peak based detection strategy used for
dots calling in the osmFISH paper
Args:
-----------
img_meta: tuple
tuple containing (image np.ndarray and metadata dict)
min_distance: np.float64
minimum distance between two peaks
min_obj_size: np.uint16
minimum object size of the objects that will be processed for peak detection
objects below this value are discharged
max_obj_size: np.uint16
maximum object size of the objects that will be processed for peak detection
objects above this value are discharged
num_peaks_per_label: np.uint16
Max number of peaks detected in each segmented object. Use None for max detection
"""
logger = selected_logger()
fov = fov_subdataset.fov_num
round_num = fov_subdataset.round_num
channel = fov_subdataset.channel
target_name = fov_subdataset.target_name
fill_value = np.nan
data_models = Output_models()
counts_dict = data_models.dots_counts_dict
# Initialise an empty version of the counts dict
counts_dict['r_px_original'] = np.array([fill_value])
counts_dict['c_px_original'] = np.array([fill_value])
counts_dict['dot_id'] = np.array([fill_value])
counts_dict['dot_intensity'] = np.array([fill_value])
counts_dict['selected_thr'] = np.array([fill_value])
fov_subdataset_df = pd.DataFrame(fov_subdataset).T
counting_parameters_dict = {
'min_distance': parameters_dict['CountingFishMinObjDistance'],
'min_obj_size': parameters_dict['CountingFishMinObjSize'],
'max_obj_size': parameters_dict['CountingFishMaxObjSize'],
'num_peaks_per_label': parameters_dict['CountingFishNumPeaksPerLabel']
}
counts = osmFISH_dots_thr_selection(ImgStack,counting_parameters_dict)
counts.counting_graph()
counts.thr_identification()
if not np.isnan(counts.selected_thr):
dots = osmFISH_dots_mapping(ImgStack,counts.selected_thr,counting_parameters_dict)
if isinstance(dots.intensity_array,np.ndarray):
# Peaks have been identified
total_dots = dots.selected_peaks.shape[0]
dot_id_array = np.array([str(fov)+'_'+str(round_num)+'_'+ channel +'_'+str(nid) for nid in range(total_dots)])
thr_array = np.repeat(counts.selected_thr,total_dots)
counts_dict['r_px_original'] = dots.selected_peaks[:,0]
counts_dict['c_px_original'] = dots.selected_peaks[:,1]
counts_dict['dot_id'] = dot_id_array
counts_dict['dot_intensity'] = dots.intensity_array
counts_dict['selected_thr'] = thr_array
else:
logger.info(f' fov {fov} does not have counts (mapping)')
else:
logger.info(f' fov {fov} does not have counts (thr)')
counts_df = pd.DataFrame(counts_dict)
fov_subdataset_df = pd.DataFrame(fov_subdataset).T
fov_subdataset_df = | pd.concat([fov_subdataset_df]*counts_df.shape[0],axis=0) | pandas.concat |
import pandas as pd
df=pd.read_csv("C:/Users/Administrator/Desktop/at1.csv")
import cv2
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('C:/Users/Administrator/Desktop/haarcascade_frontalface_default.xml')
Id = input('Enter your id:')
Name=input('Enter your name:')
df2 = | pd.DataFrame({"Id":[Id],"Name":[Name]}) | pandas.DataFrame |
import subprocess
from pandas.io.json import json_normalize
import pandas as pd
import os
import PIL
import glob
import argparse
import numpy as np
import pandas as pd
from PIL import Image
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from models.vgg import vgg16_bn
from models.inception import inception_v3
from models.resnet import resnet50,resnet152
from models.googleNet import googlenet
from densenet import densenet121, densenet161
from models.incept_resnet_v2 import InceptionResNetV2
from models.inception_v4 import InceptionV4
from models.unet import UNet
from data_util import *
from scipy.misc import imread
from scipy.misc import imresize
import random
import imp
from collections import defaultdict, OrderedDict
import time
import io
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', default=1,
help='gpu ids to use, e.g. 0 1 2 3', type=int)
parser.add_argument('--batch_size', default=4,
help='batch size, e.g. 16, 32, 64...', type=int)
parser.add_argument('--input_dir', default = '/home/shh/Passport/jyz/data/IJCAI/dev_data',
help="data input dir", type=str)
parser.add_argument('--output_dir', default="Out",
help='output dir', type=str)
parser.add_argument('--log_dir',default="./logs/test_search", type=str)
parser.add_argument('--results_file', default='results.csv',type=str)
parser.add_argument('--mode', default="nontarget", type=str)
parser.add_argument('--attack_file', default='attack_tijiao.py', type=str)
parser.add_argument('--if_attack',default=1,type=int)
parser.add_argument('--jpeg_quality',default=70,type=float)
return parser.parse_args()
args = parse_args()
print(args)
def check_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
check_mkdir(args.log_dir)
log_file = "%s/eval.log"%args.log_dir
err_file = "%s/eval.err"%args.log_dir
log_all_file = "%s/all.log"%args.log_dir
err_all_file = "%s/all.err"%args.log_dir
def load_model(model,pth_file,device):
model = model.to(device)
model.load_state_dict(torch.load(pth_file))
return model
def get_model_dics(device, model_list= None):
if model_list is None:
model_list = ['densenet121', 'densenet161', 'resnet50', 'resnet152',
'incept_v1', 'incept_v3', 'inception_v4', 'incept_resnet_v2',
'incept_v4_adv2', 'incept_resnet_v2_adv2',
'black_densenet161','black_resnet50','black_incept_v3',
'old_vgg','old_res','old_incept']
models = {}
for model in model_list:
if model=='densenet121':
models['densenet121'] = densenet121(num_classes=110)
load_model(models['densenet121'],"../pre_weights/ep_38_densenet121_val_acc_0.6527.pth",device)
if model=='densenet161':
models['densenet161'] = densenet161(num_classes=110)
load_model(models['densenet161'],"../pre_weights/ep_30_densenet161_val_acc_0.6990.pth",device)
if model=='resnet50':
models['resnet50'] = resnet50(num_classes=110)
load_model(models['resnet50'],"../pre_weights/ep_41_resnet50_val_acc_0.6900.pth",device)
if model=='incept_v3':
models['incept_v3'] = inception_v3(num_classes=110)
load_model(models['incept_v3'],"../pre_weights/ep_36_inception_v3_val_acc_0.6668.pth",device)
if model=='incept_v1':
models['incept_v1'] = googlenet(num_classes=110)
load_model(models['incept_v1'],"../pre_weights/ep_33_googlenet_val_acc_0.7091.pth",device)
#vgg16 = vgg16_bn(num_classes=110)
#load_model(vgg16, "./pre_weights/ep_30_vgg16_bn_val_acc_0.7282.pth",device)
if model=='incept_resnet_v2':
models['incept_resnet_v2'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2'], "../pre_weights/ep_17_InceptionResNetV2_ori_0.8320.pth",device)
if model=='incept_v4':
models['incept_v4'] = InceptionV4(num_classes=110)
load_model(models['incept_v4'],"../pre_weights/ep_17_InceptionV4_ori_0.8171.pth",device)
if model=='incept_resnet_v2_adv':
models['incept_resnet_v2_adv'] = InceptionResNetV2(num_classes=110)
load_model(models['incept_resnet_v2_adv'], "../pre_weights/ep_22_InceptionResNetV2_val_acc_0.8214.pth",device)
if model=='incept_v4_adv':
models['incept_v4_adv'] = InceptionV4(num_classes=110)
load_model(models['incept_v4_adv'],"../pre_weights/ep_24_InceptionV4_val_acc_0.6765.pth",device)
if model=='incept_resnet_v2_adv2':
models['incept_resnet_v2_adv2'] = InceptionResNetV2(num_classes=110)
#load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_29_InceptionResNetV2_adv2_0.8115.pth",device)
load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_13_InceptionResNetV2_val_acc_0.8889.pth",device)
if model=='incept_v4_adv2':
models['incept_v4_adv2'] = InceptionV4(num_classes=110)
# load_model(models['incept_v4_adv2'],"../test_weights/ep_32_InceptionV4_adv2_0.7579.pth",device)
load_model(models['incept_v4_adv2'],"../test_weights/ep_50_InceptionV4_val_acc_0.8295.pth",device)
if model=='resnet152':
models['resnet152'] = resnet152(num_classes=110)
load_model(models['resnet152'],"../pre_weights/ep_14_resnet152_ori_0.6956.pth",device)
if model=='resnet152_adv':
models['resnet152_adv'] = resnet152(num_classes=110)
load_model(models['resnet152_adv'],"../pre_weights/ep_29_resnet152_adv_0.6939.pth",device)
if model=='resnet152_adv2':
models['resnet152_adv2'] = resnet152(num_classes=110)
load_model(models['resnet152_adv2'],"../pre_weights/ep_31_resnet152_adv2_0.6931.pth",device)
if model=='black_resnet50':
models['black_resnet50'] = resnet50(num_classes=110)
load_model(models['black_resnet50'],"../test_weights/ep_0_resnet50_val_acc_0.7063.pth",device)
if model=='black_densenet161':
models['black_densenet161'] = densenet161(num_classes=110)
load_model(models['black_densenet161'],"../test_weights/ep_4_densenet161_val_acc_0.6892.pth",device)
if model=='black_incept_v3':
models['black_incept_v3']=inception_v3(num_classes=110)
load_model(models['black_incept_v3'],"../test_weights/ep_28_inception_v3_val_acc_0.6680.pth",device)
if model=='old_res':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_resnet_v1_50.py")
models['old_res'] = torch.load('./models_old/tf_to_pytorch_resnet_v1_50.pth').to(device)
if model=='old_vgg':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_vgg16.py")
models[model] = torch.load('./models_old/tf_to_pytorch_vgg16.pth').to(device)
if model=='old_incept':
MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_inception_v1.py")
models[model] = torch.load('./models_old/tf_to_pytorch_inception_v1.pth').to(device)
return models
def load_data_for_defense(input_dir, batch_size=16): #Only forward
all_img_paths = glob.glob(os.path.join(input_dir, '*.png'))
all_labels = [-1 for i in range(len(all_img_paths))]
dev_data = pd.DataFrame({'image_path':all_img_paths, 'label_idx':all_labels})
transformer = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5]),
])
datasets = {
'dev_data': ImageSet(dev_data, transformer)
}
dataloaders = {
ds: DataLoader(datasets[ds],
batch_size=batch_size,
num_workers=0,
shuffle=False) for ds in datasets.keys()
}
return dataloaders
def input_diversity(image, prob, low, high):
if random.random()<prob:
return image
rnd = random.randint(low, high)
rescaled = F.upsample(image, size=[rnd, rnd], mode='bilinear')
h_rem = high - rnd
w_rem = high - rnd
pad_top = random.randint( 0, h_rem)
pad_bottom = h_rem - pad_top
pad_left = random.randint(0, w_rem)
pad_right = w_rem - pad_left
padded = F.pad(rescaled, [pad_top, pad_bottom, pad_left, pad_right], 'constant', 0)
return padded
def preprocess(image,model_name="vgg16",prob=1.0):
if "incept_v3" in model_name or model_name[:16]=='incept_resnet_v2' or model_name[:9]=='incept_v4' or model_name=='resnet_152' or model_name=="black_incept_v3":
return input_diversity(image,prob,270,299)
else:
image = F.upsample(image, size=(224, 224), mode='bilinear')
if model_name=="old_res" or model_name=="old_vgg":
image = ((image/2.0)+0.5)*255.0
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
image[:, 0,:, :] = image[:, 0,:, :] - _R_MEAN
image[:, 1,:, :] = image[:, 1,:, :] - _G_MEAN
image[:, 2,:, :] = image[:, 2,:, :] - _B_MEAN
return input_diversity(image,prob,220,224)
else:
return input_diversity(image,prob,220,224)
def check_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def MSE(old_dir, new_dir,filename):
img1 = imread(os.path.join(old_dir,filename)).astype(np.float)
img2 = imread(os.path.join(new_dir,filename)).astype(np.float)
#print(np.sum((img1-img2)**2,axis=2).shape)
mse = np.sqrt(np.sum((img1-img2)**2,axis=2)).mean()
return mse
def images_tojpeg(images,images_new):
buffer = io.BytesIO()
#print('1',images.mean())
for i in range(images.shape[0]):
pil_img = transforms.ToPILImage()(images[i].detach().cpu())
pil_img.save(buffer, format='jpeg', quality=args.jpeg_quality)
images_new[i] = transforms.ToTensor()(Image.open(buffer).convert('RGB'))
def test_target_attack(device, models_dic, old_dir, new_dir, labels_dic, mode):
loader2 = load_data_for_defense(new_dir, args.batch_size)
scores , accuracys= {}, {}
per_score =0
err = 0
old_score,adv_score,black_score = [],[],[]
with torch.no_grad():
for key in models_dic.keys():
model = models_dic[key]
j=0
score = 0
correct = 0
for data in loader2['dev_data']:
image = data["image"].to(device)
filenames = data["filename"]
#images_tojpeg(image,image)
img = preprocess(image,key)
out = model(img)
if 'incept_v3' in key or "incept_v1" in key:
pred = out[0].max(1)[1]
else:
try:
pred = out.max(1)[1]
except:
print("Error!!!!, key",key,img.shape,out.max(1))
for i in range(len(pred)):
mse = MSE(old_dir, new_dir,filenames[i])
err+=mse
if mode=="target" and pred[i].item()!=labels_dic[filenames[i]]:
score+=64
elif mode=="nontarget" and pred[i].item()==labels_dic[filenames[i]]:
score+=64
correct+=1
else:
score+=mse
if mode!='nontarget':
correct+=1
j+=image.shape[0]
print(key)
if 'old' in key:
old_score.append(score/j)
if 'adv' in key:
adv_score.append(score/j)
else:
black_score.append(score/j)
scores[key] = score/j
accuracys[key] = correct/float(j)
per_score+=score/j
err = err/j
print("Test Model %s, acc is %.3f, score is %.3f."%(key, correct/float(j), score/j))
per_score/=len(models_dic)
old_score = np.mean(np.array(old_score))
adv_score = np.mean(np.array(adv_score))
black_score = np.mean(np.array(black_score))
print("Per_score:", per_score)
print("Per score for white model: %.3f"%old_score)
print("score for adv:%.2f"%adv_score)
print('score for black:%.2f'%black_score)
print('err %.3f'%err)
modified_score = old_score*0.4+adv_score*0.4+black_score*0.2
print('Modified score is %.3f'%modified_score)
return scores, accuracys, err, [old_score, adv_score, black_score, modified_score]
def try_str_to_num(str_):
try:
return int(str_)
except:
try:
return float(str_)
except:
return str_
def get_labels(input_dir):
table = pd.read_csv(input_dir+'/dev.csv')
labels = table['trueLabel'].values
filenames = table['filename'].values
labels_dic = {}
for i in range(labels.shape[0]):
labels_dic[filenames[i]] = labels[i]
return labels_dic
def get_targets(input_dir):
table = | pd.read_csv(input_dir+'/dev.csv') | pandas.read_csv |
#%%
import docx
from datetime import date
import pandas as pd
import numpy as np
from pandas.core.frame import DataFrame
from pandas.core.reshape.merge import merge_ordered
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
scale = StandardScaler()
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
import pdb
import plotnine
#%%
OMP_NUM_THREADS=1
#%%
#get all the data loaded in and take a look
newslet = pd.read_csv("newsletter_stats.csv", header=0)
spkr_eval = pd.read_csv("speaker_eval.csv",header=0)
spkr_view_stats =pd.read_csv("speaker_viewer_stats.csv", header=0)
# %% data merging and cleaning
#Need to merge all these into one file using the Speaker column
data_merge = newslet.merge(spkr_eval.merge(spkr_view_stats, on='Speaker'),
on='Speaker')
#data_merge.info()
#drop the date column
data_merge = data_merge.drop(['Date'], axis = 1)
# need to remove all % signs and convert those columns to decimals essentially divide by 100.
data_merge = data_merge.replace(to_replace ='%', value = '', regex = True)
#replace missing value in Average Percentage Viewed
data_merge.at[23,'Average Percentage Viewed'] = 15
#data_merge['Average Percentage Viewed'].mean()
convert_dict={'Average Percentage Viewed': float}
data_merge = data_merge.astype(convert_dict)
#%% data scaling
#need to scale everything aside from the average, run the fit first
data_merge.iloc[:,2:36] = scale.fit_transform(data_merge.iloc[:,2:36])
#copy it to clipboard in a excel format so I can put it into a table.
#Try minmax between 0 and 1 instead, this worked better
#mms = MinMaxScaler()
#data_merge.iloc[:,2:36] = mms.fit_transform(data_merge.iloc[:,2:36])
#data_merge.to_clipboard(excel=True)
# %% generating summary stats and gathering strongly agree columns
#Summary Stats
summary_stat = data_merge.describe()
#transpose to make the table more readable
summary_stat = summary_stat.transpose()
#copy it to excel clipboard
summary_stat.to_clipboard(excel=True)
#gathering various columns to
xx = data_merge.loc[:, data_merge.columns.str.startswith('Strongly Agree')]
xy = data_merge.loc[:,'Speaker']
speaker_perc = pd.concat([xy,xx],axis=1, join='inner')
#need to find the average of rows
#%% generating strongly agree average
#generate the means
ave_rate_spk = speaker_perc.mean(axis=0)
speaker_perc['Strongly Agree_5_yt'] = speaker_perc['Strongly Agree_5_yt'].astype(int)
mean_spkr_rate = speaker_perc.mean(axis=1)
speaker_perc = pd.concat([speaker_perc,mean_spkr_rate], axis=1,join='inner')
#copy to clipboard thats ready to paste into excel
speaker_perc.to_clipboard(excel=True)
speaker_perc.rename(columns = {0:"ave"}, inplace=True)
summary_speaker_prec = speaker_perc.describe()
#Speak to the 3rd/75% upper quartile just 5 speakers, in terms of average feedback in the
#strongly agree category.
summary_speaker_prec
#%% Not sure I needed to do any of this...but converting data types from object to float
# Let's do some clustering but need to convert everything to a int, likely need to write a
# function to make that easier.
#start here...need to convert these all to int, to do the clustering
#selecting on column name
agree = data_merge.loc[:, data_merge.columns.str.contains('Agree')]
#creating a index based on column type
sel = agree.select_dtypes(include='object').columns
#passing the index back into the original dataset to change the variable types
data_merge[sel] = data_merge[sel].astype("float")
#average percentage viewed
#data_merge['Average Percentage Viewed'] = data_merge.Average Percentage Viewed.astype(float)
#%%
data_merge.dtypes
#%% Clustering data
cluster_data = data_merge.iloc[:,2:36]
#replaced missing data point and change to a array
cluster_data = cluster_data.to_numpy()
cluster_data[10,29] = .05
#%% clustering algo
init_kmeans = KMeans(
init="random",
n_clusters=7,
n_init=10,
max_iter=300,
random_state=1518
)
#changed the data to a numby array
init_kmeans.fit(cluster_data)
init_kmeans.inertia_
init_kmeans.n_iter_
#%% Need to optimize the data, looks like 6, we don't pass in the cluster
#argument here because we will for loop that below.
kmeans_args = {
"init": "random",
"n_init": 6,
"max_iter": 300,
"random_state": 1518,
}
#frame for the standard error output
sse= []
#simple for loop to run through the options, function would be better but short on time.
for k in range(1, 12):
kmeans = KMeans(n_clusters=k, **kmeans_args) #**special chara that allows to
#pass multiple arguments
kmeans.fit(cluster_data)
sse.append(kmeans.inertia_)
# %% checking on missing data, which I had and inifity which I didn't
np.any(np.isnan(cluster_data))#one missing data point so didn't work
#np.all(np.isfinite(cluster_data))
#%%
plt.style.use("fivethirtyeight")
plt.plot(range(1, 12), sse)
plt.xticks(range(1, 12))
plt.xlabel("Number of Clusters")
plt.ylabel("SSE")
plt.show()
#%% Capturing the predicted lables.
label = init_kmeans.fit_predict(cluster_data)
#%% #x cluster no.,y used to generate cluster, z is label index
def label_index_1(x,y,z):
#x cluster no.,y used to generate cluster, z is label index
filtered_label = y[z == x]
return filtered_label
#%%
label_n = []
for k in range(0,7):
output = label_index_1(k,cluster_data,label)
plt.scatter(output[:,0] , output[:,1], label = k)
plt.legend()
# %%
xxx = label_index_1(1,cluster_data,label)
#plt.savefig('speaker_cluster.png')
# %%
label_pd = | pd.DataFrame(label) | pandas.DataFrame |
import sys, os
import unittest
import pandas as pd
import numpy
import sys
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, Imputer, LabelEncoder, LabelBinarizer, MinMaxScaler, MaxAbsScaler, RobustScaler,\
Binarizer, PolynomialFeatures, OneHotEncoder, KBinsDiscretizer
from sklearn_pandas import CategoricalImputer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.svm import SVC, SVR, LinearSVC, LinearSVR, OneClassSVM
from sklearn.neural_network import MLPClassifier, MLPRegressor
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.naive_bayes import GaussianNB
from sklearn_pandas import DataFrameMapper
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier,\
RandomForestRegressor, IsolationForest
from sklearn.linear_model import LinearRegression, LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.gaussian_process import GaussianProcessClassifier
from nyoka.preprocessing import Lag
from nyoka import skl_to_pmml
from nyoka import PMML44 as pml
class TestMethods(unittest.TestCase):
def test_sklearn_01(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "svc_pmml.pmml"
model = SVC()
pipeline_obj = Pipeline([
('svm',model)
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
## 1
svms = pmml_obj.SupportVectorMachineModel[0].SupportVectorMachine
for mod_val, recon_val in zip(model.intercept_, svms):
self.assertEqual("{:.16f}".format(mod_val), "{:.16f}".format(recon_val.Coefficients.absoluteValue))
## 2
svm = pmml_obj.SupportVectorMachineModel[0]
self.assertEqual(svm.RadialBasisKernelType.gamma,model._gamma)
def test_sklearn_02(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data,columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "knn_pmml.pmml"
pipeline_obj = Pipeline([
('scaling',StandardScaler()),
('knn',KNeighborsClassifier(n_neighbors = 5))
])
pipeline_obj.fit(irisd[features],irisd[target])
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertIsNotNone(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.euclidean)
##2
self.assertEqual(pmml_obj.NearestNeighborModel[0].ComparisonMeasure.kind, "distance")
##3
self.assertEqual(pipeline_obj.steps[-1][-1].n_neighbors, pmml_obj.NearestNeighborModel[0].numberOfNeighbors)
def test_sklearn_03(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "rf_pmml.pmml"
model = RandomForestClassifier(n_estimators = 100)
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("rfc", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
self.assertEqual(model.n_estimators,pmml_obj.MiningModel[0].Segmentation.Segment.__len__())
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "majorityVote")
def test_sklearn_04(self):
titanic = pd.read_csv("nyoka/tests/titanic_train.csv")
features = titanic.columns
target = 'Survived'
f_name = "gb_pmml.pmml"
pipeline_obj = Pipeline([
("imp", Imputer(strategy="median")),
("gbc", GradientBoostingClassifier(n_estimators = 10))
])
pipeline_obj.fit(titanic[features],titanic[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
##1
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.multipleModelMethod, "modelChain")
##2
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment.__len__(), 2)
##3
self.assertEqual(pmml_obj.MiningModel[0].Segmentation.Segment[1].RegressionModel.normalizationMethod, "logit")
def test_sklearn_05(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg'],axis=1)
y = df['mpg']
features = [name for name in df.columns if name not in ('mpg')]
target = 'mpg'
pipeline_obj = Pipeline([
('mapper', DataFrameMapper([
('car name', TfidfVectorizer())
])),
('model',DecisionTreeRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"dtr_pmml.pmml")
self.assertEqual(os.path.isfile("dtr_pmml.pmml"),True)
def test_sklearn_06(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
f_name = "linearregression_pmml.pmml"
model = LinearRegression()
pipeline_obj = Pipeline([
('model',model)
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,f_name)
pmml_obj = pml.parse(f_name, True)
## 1
reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0]
self.assertEqual(reg_tab.intercept,model.intercept_)
## 2
for model_val, pmml_val in zip(model.coef_, reg_tab.NumericPredictor):
self.assertEqual("{:.16f}".format(model_val),"{:.16f}".format(pmml_val.coefficient))
def test_sklearn_07(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
f_name = "logisticregression_pmml.pmml"
model = LogisticRegression()
pipeline_obj = Pipeline([
("mapping", DataFrameMapper([
(['sepal length (cm)', 'sepal width (cm)'], StandardScaler()) ,
(['petal length (cm)', 'petal width (cm)'], Imputer())
])),
("lr", model)
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name,True)
## 1
segmentation = pmml_obj.MiningModel[0].Segmentation
self.assertEqual(segmentation.Segment.__len__(), model.classes_.__len__()+1)
## 2
self.assertEqual(segmentation.multipleModelMethod, "modelChain")
##3
self.assertEqual(segmentation.Segment[-1].RegressionModel.normalizationMethod, "simplemax")
##4
for i in range(model.classes_.__len__()):
self.assertEqual(segmentation.Segment[i].RegressionModel.normalizationMethod, "logit")
self.assertEqual("{:.16f}".format(model.intercept_[i]),\
"{:.16f}".format(segmentation.Segment[i].RegressionModel.RegressionTable[0].intercept))
def test_sklearn_08(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = [i%2 for i in range(iris.data.shape[0])]
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
('pca',PCA(2)),
('mod',LogisticRegression())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "logisticregression_pca_pmml.pmml")
self.assertEqual(os.path.isfile("logisticregression_pca_pmml.pmml"),True)
def test_sklearn_09(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdclassifier_pmml.pmml")
self.assertEqual(os.path.isfile("sgdclassifier_pmml.pmml"),True)
def test_sklearn_10(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("lsvc", LinearSVC())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "linearsvc_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvc_pmml.pmml"),True)
def test_sklearn_11(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',LinearSVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"linearsvr_pmml.pmml")
self.assertEqual(os.path.isfile("linearsvr_pmml.pmml"),True)
def test_sklearn_12(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',GradientBoostingRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"gbr.pmml")
self.assertEqual(os.path.isfile("gbr.pmml"),True)
def test_sklearn_13(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("SGD", DecisionTreeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "dtr_clf.pmml")
self.assertEqual(os.path.isfile("dtr_clf.pmml"),True)
def test_sklearn_14(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',RandomForestRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"rfr.pmml")
self.assertEqual(os.path.isfile("rfr.pmml"),True)
def test_sklearn_15(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',KNeighborsRegressor())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"knnr.pmml")
self.assertEqual(os.path.isfile("knnr.pmml"),True)
def test_sklearn_16(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg','car name'],axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
pipeline_obj = Pipeline([
('model',SVR())
])
pipeline_obj.fit(X,y)
skl_to_pmml(pipeline_obj,features,target,"svr.pmml")
self.assertEqual(os.path.isfile("svr.pmml"),True)
def test_sklearn_17(self):
irisdata = datasets.load_iris()
iris = pd.DataFrame(irisdata.data,columns=irisdata.feature_names)
iris['Species'] = irisdata.target
feature_names = iris.columns.drop('Species')
X = iris[iris.columns.drop(['Species'])]
pipeline_obj = Pipeline([
('standard_scaler',StandardScaler()),
('Imputer',Imputer()),
('model',OneClassSVM())
])
pipeline_obj.fit(X)
skl_to_pmml(pipeline_obj, feature_names, pmml_f_name="one_class_svm.pmml")
self.assertEqual(os.path.isfile("one_class_svm.pmml"),True)
def test_sklearn_18(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", GaussianNB())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "gnb.pmml")
self.assertEqual(os.path.isfile("gnb.pmml"),True)
def test_sklearn_19(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", SGDClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "sgdc.pmml")
self.assertEqual(os.path.isfile("sgdc.pmml"),True)
def test_sklearn_20(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", RidgeClassifier())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "ridge.pmml")
self.assertEqual(os.path.isfile("ridge.pmml"),True)
def test_sklearn_21(self):
iris = datasets.load_iris()
irisd = pd.DataFrame(iris.data, columns=iris.feature_names)
irisd['Species'] = iris.target
features = irisd.columns.drop('Species')
target = 'Species'
pipeline_obj = Pipeline([
("model", LinearDiscriminantAnalysis())
])
pipeline_obj.fit(irisd[features], irisd[target])
skl_to_pmml(pipeline_obj, features, target, "lda.pmml")
self.assertEqual(os.path.isfile("lda.pmml"),True)
def test_sklearn_22(self):
iris = datasets.load_iris()
irisd = | pd.DataFrame(iris.data, columns=iris.feature_names) | pandas.DataFrame |
from collections import abc, deque
from decimal import Decimal
from io import StringIO
from warnings import catch_warnings
import numpy as np
from numpy.random import randn
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
concat,
date_range,
read_csv,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
@pytest.fixture(params=[True, False])
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
class TestConcatenate:
def test_concat_copy(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for b in result._mgr.blocks:
assert b.values.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is df._mgr.blocks[0].values.base
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for b in result._mgr.blocks:
if b.is_float:
assert b.values.base is None
elif b.is_integer:
assert b.values.base is df2._mgr.blocks[0].values.base
elif b.is_object:
assert b.values.base is not None
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
def test_concat_dataframe_keys_bug(self, sort):
t1 = DataFrame(
{"value": Series([1, 2, 3], index=Index(["a", "b", "c"], name="id"))}
)
t2 = DataFrame({"value": Series([7, 8], index=Index(["a", "b"], name="id"))})
# it works
result = concat([t1, t2], axis=1, keys=["t1", "t2"], sort=sort)
assert list(result.columns) == [("t1", "value"), ("t2", "value")]
def test_concat_series_partial_columns_names(self):
# GH10698
foo = Series([1, 2], name="foo")
bar = Series([1, 2])
baz = Series([4, 5])
result = concat([foo, bar, baz], axis=1)
expected = DataFrame(
{"foo": [1, 2], 0: [1, 2], 1: [4, 5]}, columns=["foo", 0, 1]
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, keys=["red", "blue", "yellow"])
expected = DataFrame(
{"red": [1, 2], "blue": [1, 2], "yellow": [4, 5]},
columns=["red", "blue", "yellow"],
)
tm.assert_frame_equal(result, expected)
result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0: [1, 2], 1: [1, 2], 2: [4, 5]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
["a", 1, 4.5, 5.2],
["b", 2, 3.2, 2.2],
["c", 3, 1.2, nan],
],
index=Index(["q", "x", "y", "z"]),
)
if not sort:
expected = expected.loc[["x", "y", "z", "q"]]
tm.assert_frame_equal(v1, expected)
@pytest.mark.parametrize(
"name_in1,name_in2,name_in3,name_out",
[
("idx", "idx", "idx", "idx"),
("idx", "idx", None, None),
("idx", None, None, None),
("idx1", "idx2", None, None),
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Index(["b", "c", "d"], name=name_in2),
Index(["c", "d", "e"], name=name_in3),
]
frames = [
DataFrame({c: [0, 1, 2]}, index=i) for i, c in zip(indices, ["x", "y", "z"])
]
result = pd.concat(frames, axis=1)
exp_ind = Index(["a", "b", "c", "d", "e"], name=name_out)
expected = DataFrame(
{
"x": [0, 1, 2, np.nan, np.nan],
"y": [np.nan, 0, 1, 2, np.nan],
"z": [np.nan, np.nan, 0, 1, 2],
},
index=exp_ind,
)
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_with_keys(self):
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
frame = DataFrame(
np.random.randn(10, 3),
index=index,
columns=Index(["A", "B", "C"], name="exp"),
)
result = concat([frame, frame], keys=[0, 1], names=["iteration"])
assert result.index.names == ("iteration",) + index.names
tm.assert_frame_equal(result.loc[0], frame)
tm.assert_frame_equal(result.loc[1], frame)
assert result.index.nlevels == 3
def test_concat_multiindex_with_none_in_index_names(self):
# GH 15787
index = pd.MultiIndex.from_product([[1], range(5)], names=["level1", None])
df = DataFrame({"col": range(5)}, index=index, dtype=np.int32)
result = concat([df, df], keys=[1, 2], names=["level2"])
index = pd.MultiIndex.from_product(
[[1, 2], [1], range(5)], names=["level2", "level1", None]
)
expected = DataFrame({"col": list(range(5)) * 2}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
result = concat([df, df[:2]], keys=[1, 2], names=["level2"])
level2 = [1] * 5 + [2] * 2
level1 = [1] * 7
no_name = list(range(5)) + list(range(2))
tuples = list(zip(level2, level1, no_name))
index = pd.MultiIndex.from_tuples(tuples, names=["level2", "level1", None])
expected = DataFrame({"col": no_name}, index=index, dtype=np.int32)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
b = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_b"),
)
result = concat([a, b], keys=["key0", "key1"], names=["lvl0", "lvl1"])
exp = concat([a, b], keys=["key0", "key1"], names=["lvl0"])
names = list(exp.index.names)
names[1] = "lvl1"
exp.index.set_names(names, inplace=True)
tm.assert_frame_equal(result, exp)
assert result.index.names == exp.index.names
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_dups_index(self):
# GH 4771
# single dtypes
df = DataFrame(
np.random.randint(0, 10, size=40).reshape(10, 4),
columns=["A", "A", "C", "C"],
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :4], df)
tm.assert_frame_equal(result.iloc[:, 4:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# multi dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result.iloc[:, :6], df)
tm.assert_frame_equal(result.iloc[:, 6:], df)
result = concat([df, df], axis=0)
tm.assert_frame_equal(result.iloc[:10], df)
tm.assert_frame_equal(result.iloc[10:], df)
# append
result = df.iloc[0:8, :].append(df.iloc[8:])
tm.assert_frame_equal(result, df)
result = df.iloc[0:8, :].append(df.iloc[8:9]).append(df.iloc[9:10])
tm.assert_frame_equal(result, df)
expected = concat([df, df], axis=0)
result = df.append(df)
tm.assert_frame_equal(result, expected)
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_handle_empty_objects(self, sort):
df = DataFrame(np.random.randn(10, 4), columns=list("abcd"))
baz = df[:5].copy()
baz["foo"] = "bar"
empty = df[5:5]
frames = [baz, empty, empty, df[5:]]
concatted = concat(frames, axis=0, sort=sort)
expected = df.reindex(columns=["a", "b", "c", "d", "foo"])
expected["foo"] = expected["foo"].astype("O")
expected.loc[0:4, "foo"] = "bar"
tm.assert_frame_equal(concatted, expected)
# empty as first element with time series
# GH3259
df = DataFrame(
dict(A=range(10000)), index=date_range("20130101", periods=10000, freq="s")
)
empty = DataFrame()
result = concat([df, empty], axis=1)
tm.assert_frame_equal(result, df)
result = concat([empty, df], axis=1)
tm.assert_frame_equal(result, df)
result = concat([df, empty])
tm.assert_frame_equal(result, df)
result = concat([empty, df])
tm.assert_frame_equal(result, df)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
tm.assert_frame_equal(result, expected)
def test_empty_dtype_coerce(self):
# xref to #12411
# xref to #12045
# xref to #11594
# see below
# 10571
df1 = DataFrame(data=[[1, None], [2, None]], columns=["a", "b"])
df2 = DataFrame(data=[[3, None], [4, None]], columns=["a", "b"])
result = concat([df1, df2])
expected = df1.dtypes
tm.assert_series_equal(result.dtypes, expected)
def test_dtype_coerceion(self):
# 12411
df = DataFrame({"date": [pd.Timestamp("20130101").tz_localize("UTC"), pd.NaT]})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 12045
import datetime
df = DataFrame(
{"date": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}
)
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
# 11594
df = DataFrame({"text": ["some words"] + [None] * 9})
result = concat([df.iloc[[0]], df.iloc[[1]]])
tm.assert_series_equal(result.dtypes, df.dtypes)
def test_concat_series(self):
ts = tm.makeTimeSeries()
ts.name = "foo"
pieces = [ts[:5], ts[5:15], ts[15:]]
result = concat(pieces)
tm.assert_series_equal(result, ts)
assert result.name == ts.name
result = concat(pieces, keys=[0, 1, 2])
expected = ts.copy()
ts.index = DatetimeIndex(np.array(ts.index.values, dtype="M8[ns]"))
exp_codes = [np.repeat([0, 1, 2], [len(x) for x in pieces]), np.arange(len(ts))]
exp_index = MultiIndex(levels=[[0, 1, 2], ts.index], codes=exp_codes)
expected.index = exp_index
tm.assert_series_equal(result, expected)
def test_concat_series_axis1(self, sort=sort):
ts = tm.makeTimeSeries()
pieces = [ts[:-2], ts[2:], ts[2:-2]]
result = concat(pieces, axis=1)
expected = DataFrame(pieces).T
tm.assert_frame_equal(result, expected)
result = concat(pieces, keys=["A", "B", "C"], axis=1)
expected = DataFrame(pieces, index=["A", "B", "C"]).T
tm.assert_frame_equal(result, expected)
# preserve series names, #2489
s = Series(randn(5), name="A")
s2 = Series(randn(5), name="B")
result = concat([s, s2], axis=1)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
s2.name = None
result = concat([s, s2], axis=1)
tm.assert_index_equal(result.columns, Index(["A", 0], dtype="object"))
# must reindex, #2603
s = Series(randn(3), index=["c", "a", "b"], name="A")
s2 = Series(randn(4), index=["d", "a", "b", "c"], name="B")
result = concat([s, s2], axis=1, sort=sort)
expected = DataFrame({"A": s, "B": s2})
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_names_applied(self):
# ensure names argument is not ignored on axis=1, #23490
s = Series([1, 2, 3])
s2 = Series([4, 5, 6])
result = concat([s, s2], axis=1, keys=["a", "b"], names=["A"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]], columns=Index(["a", "b"], name="A")
)
tm.assert_frame_equal(result, expected)
result = concat([s, s2], axis=1, keys=[("a", 1), ("b", 2)], names=["A", "B"])
expected = DataFrame(
[[1, 4], [2, 5], [3, 6]],
columns=MultiIndex.from_tuples([("a", 1), ("b", 2)], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
def test_concat_single_with_key(self):
df = DataFrame(np.random.randn(10, 4))
result = concat([df], keys=["foo"])
expected = concat([df, df], keys=["foo", "bar"])
tm.assert_frame_equal(result, expected[:10])
def test_concat_exclude_none(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df[:5], None, None, df[5:]]
result = concat(pieces)
tm.assert_frame_equal(result, df)
with pytest.raises(ValueError, match="All objects passed were None"):
concat([None, None])
def test_concat_timedelta64_block(self):
from pandas import to_timedelta
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_keys_with_none(self):
# #1649
df0 = DataFrame([[10, 20, 30], [10, 20, 30], [10, 20, 30]])
result = concat(dict(a=None, b=df0, c=df0[:2], d=df0[:1], e=df0))
expected = concat(dict(b=df0, c=df0[:2], d=df0[:1], e=df0))
tm.assert_frame_equal(result, expected)
result = concat(
[None, df0, df0[:2], df0[:1], df0], keys=["a", "b", "c", "d", "e"]
)
expected = concat([df0, df0[:2], df0[:1], df0], keys=["b", "c", "d", "e"])
tm.assert_frame_equal(result, expected)
def test_concat_bug_1719(self):
ts1 = tm.makeTimeSeries()
ts2 = tm.makeTimeSeries()[::2]
# to join with union
# these two are of different length!
left = concat([ts1, ts2], join="outer", axis=1)
right = concat([ts2, ts1], join="outer", axis=1)
assert len(left) == len(right)
def test_concat_bug_2972(self):
ts0 = Series(np.zeros(5))
ts1 = Series(np.ones(5))
ts0.name = ts1.name = "same name"
result = concat([ts0, ts1], axis=1)
expected = DataFrame({0: ts0, 1: ts1})
expected.columns = ["same name", "same name"]
tm.assert_frame_equal(result, expected)
def test_concat_bug_3602(self):
# GH 3602, duplicate columns
df1 = DataFrame(
{
"firmNo": [0, 0, 0, 0],
"prc": [6, 6, 6, 6],
"stringvar": ["rrr", "rrr", "rrr", "rrr"],
}
)
df2 = DataFrame(
{"C": [9, 10, 11, 12], "misc": [1, 2, 3, 4], "prc": [6, 6, 6, 6]}
)
expected = DataFrame(
[
[0, 6, "rrr", 9, 1, 6],
[0, 6, "rrr", 10, 2, 6],
[0, 6, "rrr", 11, 3, 6],
[0, 6, "rrr", 12, 4, 6],
]
)
expected.columns = ["firmNo", "prc", "stringvar", "C", "misc", "prc"]
result = concat([df1, df2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_inner_join_empty(self):
# GH 15328
df_empty = DataFrame()
df_a = DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
df_expected = DataFrame({"a": []}, index=[], dtype="int64")
for how, expected in [("inner", df_expected), ("outer", df_a)]:
result = pd.concat([df_a, df_empty], axis=1, join=how)
tm.assert_frame_equal(result, expected)
def test_concat_series_axis1_same_names_ignore_index(self):
dates = date_range("01-Jan-2013", "01-Jan-2014", freq="MS")[0:-1]
s1 = Series(randn(len(dates)), index=dates, name="value")
s2 = Series(randn(len(dates)), index=dates, name="value")
result = concat([s1, s2], axis=1, ignore_index=True)
expected = Index([0, 1])
tm.assert_index_equal(result.columns, expected)
def test_concat_iterables(self):
# GH8645 check concat works with tuples, list, generators, and weird
# stuff like deque and custom iterables
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
tm.assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
tm.assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
tm.assert_frame_equal(
concat((df for df in (df1, df2)), ignore_index=True), expected
)
tm.assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1:
def __len__(self) -> int:
return 2
def __getitem__(self, index):
try:
return {0: df1, 1: df2}[index]
except KeyError as err:
raise IndexError from err
tm.assert_frame_equal(pd.concat(CustomIterator1(), ignore_index=True), expected)
class CustomIterator2(abc.Iterable):
def __iter__(self):
yield df1
yield df2
tm.assert_frame_equal(pd.concat(CustomIterator2(), ignore_index=True), expected)
def test_concat_invalid(self):
# trying to concat a ndframe with a non-ndframe
df1 = tm.makeCustomDataframe(10, 2)
for obj in [1, dict(), [1, 2], (1, 2)]:
msg = (
f"cannot concatenate object of type '{type(obj)}'; "
"only Series and DataFrame objs are valid"
)
with pytest.raises(TypeError, match=msg):
concat([df1, obj])
def test_concat_invalid_first_argument(self):
df1 = tm.makeCustomDataframe(10, 2)
df2 = tm.makeCustomDataframe(10, 2)
msg = (
"first argument must be an iterable of pandas "
'objects, you passed an object of type "DataFrame"'
)
with pytest.raises(TypeError, match=msg):
concat(df1, df2)
# generator ok though
concat(DataFrame(np.random.rand(5, 5)) for _ in range(3))
# text reader ok
# GH6583
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
reader = read_csv(StringIO(data), chunksize=1)
result = concat(reader, ignore_index=True)
expected = read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
def test_concat_empty_series(self):
# GH 11082
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], "y": [np.nan, np.nan, np.nan]},
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
s1 = Series([1, 2, 3], name="x")
s2 = Series(name="y", dtype="float64")
res = pd.concat([s1, s2], axis=0)
# name will be reset
exp = Series([1, 2, 3])
tm.assert_series_equal(res, exp)
# empty Series with no name
s1 = Series([1, 2, 3], name="x")
s2 = Series(name=None, dtype="float64")
res = pd.concat([s1, s2], axis=1)
exp = DataFrame(
{"x": [1, 2, 3], 0: [np.nan, np.nan, np.nan]},
columns=["x", 0],
index=Index([0, 1, 2], dtype="O"),
)
tm.assert_frame_equal(res, exp)
@pytest.mark.parametrize("tz", [None, "UTC"])
@pytest.mark.parametrize("values", [[], [1, 2, 3]])
def test_concat_empty_series_timelike(self, tz, values):
# GH 18447
first = Series([], dtype="M8[ns]").dt.tz_localize(tz)
dtype = None if values else np.float64
second = Series(values, dtype=dtype)
expected = DataFrame(
{
0: Series([pd.NaT] * len(values), dtype="M8[ns]").dt.tz_localize(tz),
1: values,
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
def test_default_index(self):
# is_series and ignore_index
s1 = Series([1, 2, 3], name="x")
s2 = Series([4, 5, 6], name="y")
res = pd.concat([s1, s2], axis=1, ignore_index=True)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
# use check_index_type=True to check the result have
# RangeIndex (default index)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_series and all inputs have no names
s1 = Series([1, 2, 3])
s2 = Series([4, 5, 6])
res = pd.concat([s1, s2], axis=1, ignore_index=False)
assert isinstance(res.columns, pd.RangeIndex)
exp = DataFrame([[1, 4], [2, 5], [3, 6]])
exp.columns = pd.RangeIndex(2)
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
# is_dataframe and ignore_index
df1 = DataFrame({"A": [1, 2], "B": [5, 6]})
df2 = DataFrame({"A": [3, 4], "B": [7, 8]})
res = pd.concat([df1, df2], axis=0, ignore_index=True)
exp = DataFrame([[1, 5], [2, 6], [3, 7], [4, 8]], columns=["A", "B"])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
res = pd.concat([df1, df2], axis=1, ignore_index=True)
exp = DataFrame([[1, 5, 3, 7], [2, 6, 4, 8]])
tm.assert_frame_equal(res, exp, check_index_type=True, check_column_type=True)
def test_concat_multiindex_rangeindex(self):
# GH13542
# when multi-index levels are RangeIndex objects
# there is a bug in concat with objects of len 1
df = DataFrame(np.random.randn(9, 2))
df.index = MultiIndex(
levels=[pd.RangeIndex(3), pd.RangeIndex(3)],
codes=[np.repeat(np.arange(3), 3), np.tile(np.arange(3), 3)],
)
res = concat([df.iloc[[2, 3, 4], :], df.iloc[[5], :]])
exp = df.iloc[[2, 3, 4, 5], :]
tm.assert_frame_equal(res, exp)
def test_concat_multiindex_dfs_with_deepcopy(self):
# GH 9967
from copy import deepcopy
example_multiindex1 = pd.MultiIndex.from_product([["a"], ["b"]])
example_dataframe1 = DataFrame([0], index=example_multiindex1)
example_multiindex2 = pd.MultiIndex.from_product([["a"], ["c"]])
example_dataframe2 = DataFrame([1], index=example_multiindex2)
example_dict = {"s1": example_dataframe1, "s2": example_dataframe2}
expected_index = pd.MultiIndex(
levels=[["s1", "s2"], ["a"], ["b", "c"]],
codes=[[0, 1], [0, 0], [0, 1]],
names=["testname", None, None],
)
expected = DataFrame([[0], [1]], index=expected_index)
result_copy = pd.concat(deepcopy(example_dict), names=["testname"])
tm.assert_frame_equal(result_copy, expected)
result_no_copy = pd.concat(example_dict, names=["testname"])
tm.assert_frame_equal(result_no_copy, expected)
def test_categorical_concat_append(self):
cat = Categorical(["a", "b"], categories=["a", "b"])
vals = [1, 2]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical(["a", "b", "a", "b"], categories=["a", "b"])
vals2 = [1, 2, 1, 2]
exp = DataFrame({"cats": cat2, "vals": vals2}, index=Index([0, 1, 0, 1]))
tm.assert_frame_equal(pd.concat([df, df]), exp)
tm.assert_frame_equal(df.append(df), exp)
# GH 13524 can concat different categories
cat3 = Categorical(["a", "b"], categories=["a", "b", "c"])
vals3 = [1, 2]
df_different_categories = DataFrame({"cats": cat3, "vals": vals3})
res = pd.concat([df, df_different_categories], ignore_index=True)
exp = DataFrame({"cats": list("abab"), "vals": [1, 2, 1, 2]})
tm.assert_frame_equal(res, exp)
res = df.append(df_different_categories, ignore_index=True)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_gh7864(self):
# GH 7864
# make sure ordering is preserved
df = DataFrame({"id": [1, 2, 3, 4, 5, 6], "raw_grade": list("abbaae")})
df["grade"] = Categorical(df["raw_grade"])
df["grade"].cat.set_categories(["e", "a", "b"])
df1 = df[0:3]
df2 = df[3:]
tm.assert_index_equal(df["grade"].cat.categories, df1["grade"].cat.categories)
tm.assert_index_equal(df["grade"].cat.categories, df2["grade"].cat.categories)
dfx = pd.concat([df1, df2])
tm.assert_index_equal(df["grade"].cat.categories, dfx["grade"].cat.categories)
dfa = df1.append(df2)
tm.assert_index_equal(df["grade"].cat.categories, dfa["grade"].cat.categories)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
msg = "categories must match existing categories when appending"
with pytest.raises(TypeError, match=msg):
pd.concat([df2, df3])
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_concat_order(self):
# GH 17344
dfs = [DataFrame(index=range(3), columns=["a", 1, None])]
dfs += [DataFrame(index=range(3), columns=[None, 1, "a"]) for i in range(100)]
result = pd.concat(dfs, sort=True).columns
expected = dfs[0].columns
tm.assert_index_equal(result, expected)
def test_concat_different_extension_dtypes_upcasts(self):
a = Series(pd.core.arrays.integer_array([1, 2]))
b = Series(to_decimal([1, 2]))
result = pd.concat([a, b], ignore_index=True)
expected = Series([1, 2, Decimal(1), Decimal(2)], dtype=object)
tm.assert_series_equal(result, expected)
def test_concat_odered_dict(self):
# GH 21510
expected = pd.concat(
[Series(range(3)), Series(range(4))], keys=["First", "Another"]
)
result = pd.concat(
dict([("First", Series(range(3))), ("Another", Series(range(4)))])
)
tm.assert_series_equal(result, expected)
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
@pytest.mark.parametrize("pdt", [Series, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["float"])
def test_concat_no_unnecessary_upcast(dt, pdt):
# GH 13247
dims = pdt(dtype=object).ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], dtype=dt, ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == dt
@pytest.mark.parametrize("pdt", [create_series_with_explicit_dtype, pd.DataFrame])
@pytest.mark.parametrize("dt", np.sctypes["int"])
def test_concat_will_upcast(dt, pdt):
with catch_warnings(record=True):
dims = pdt().ndim
dfs = [
pdt(np.array([1], dtype=dt, ndmin=dims)),
pdt(np.array([np.nan], ndmin=dims)),
pdt(np.array([5], dtype=dt, ndmin=dims)),
]
x = pd.concat(dfs)
assert x.values.dtype == "float64"
def test_concat_empty_and_non_empty_frame_regression():
# GH 18178 regression test
df1 = DataFrame({"foo": [1]})
df2 = DataFrame({"foo": []})
expected = DataFrame({"foo": [1.0]})
result = pd.concat([df1, df2])
tm.assert_frame_equal(result, expected)
def test_concat_empty_and_non_empty_series_regression():
# GH 18187 regression test
s1 = Series([1])
s2 = Series([], dtype=object)
expected = s1
result = pd.concat([s1, s2])
tm.assert_series_equal(result, expected)
def test_concat_sorts_columns(sort):
# GH-4588
df1 = DataFrame({"a": [1, 2], "b": [1, 2]}, columns=["b", "a"])
df2 = DataFrame({"a": [3, 4], "c": [5, 6]})
# for sort=True/None
expected = DataFrame(
{"a": [1, 2, 3, 4], "b": [1, 2, None, None], "c": [None, None, 5, 6]},
columns=["a", "b", "c"],
)
if sort is False:
expected = expected[["b", "a", "c"]]
# default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], ignore_index=True, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_sorts_index(sort):
df1 = DataFrame({"a": [1, 2, 3]}, index=["c", "a", "b"])
df2 = DataFrame({"b": [1, 2]}, index=["a", "b"])
# For True/None
expected = DataFrame(
{"a": [2, 3, 1], "b": [1, 2, None]}, index=["a", "b", "c"], columns=["a", "b"]
)
if sort is False:
expected = expected.loc[["c", "a", "b"]]
# Warn and sort by default
with tm.assert_produces_warning(None):
result = pd.concat([df1, df2], axis=1, sort=sort)
tm.assert_frame_equal(result, expected)
def test_concat_inner_sort(sort):
# https://github.com/pandas-dev/pandas/pull/20613
df1 = DataFrame({"a": [1, 2], "b": [1, 2], "c": [1, 2]}, columns=["b", "a", "c"])
df2 = DataFrame({"a": [1, 2], "b": [3, 4]}, index=[3, 4])
with tm.assert_produces_warning(None):
# unset sort should *not* warn for inner join
# since that never sorted
result = pd.concat([df1, df2], sort=sort, join="inner", ignore_index=True)
expected = DataFrame({"b": [1, 2, 3, 4], "a": [1, 2, 1, 2]}, columns=["b", "a"])
if sort is True:
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort():
# GH-4588
df = DataFrame({"c": [1, 2], "b": [3, 4], "a": [5, 6]}, columns=["c", "b", "a"])
result = pd.concat([df, df], sort=True, ignore_index=True)
expected = DataFrame(
{"a": [5, 6, 5, 6], "b": [3, 4, 3, 4], "c": [1, 2, 1, 2]},
columns=["a", "b", "c"],
)
tm.assert_frame_equal(result, expected)
result = pd.concat([df, df[["c", "b"]]], join="inner", sort=True, ignore_index=True)
expected = expected[["b", "c"]]
tm.assert_frame_equal(result, expected)
def test_concat_aligned_sort_does_not_raise():
# GH-4588
# We catch TypeErrors from sorting internally and do not re-raise.
df = DataFrame({1: [1, 2], "a": [3, 4]}, columns=[1, "a"])
expected = DataFrame({1: [1, 2, 1, 2], "a": [3, 4, 3, 4]}, columns=[1, "a"])
result = pd.concat([df, df], ignore_index=True, sort=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("s1name,s2name", [(np.int64(190), (43, 0)), (190, (43, 0))])
def test_concat_series_name_npscalar_tuple(s1name, s2name):
# GH21015
s1 = Series({"a": 1, "b": 2}, name=s1name)
s2 = Series({"c": 5, "d": 6}, name=s2name)
result = pd.concat([s1, s2])
expected = Series({"a": 1, "b": 2, "c": 5, "d": 6})
tm.assert_series_equal(result, expected)
def test_concat_categorical_tz():
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = pd.concat([a, b], ignore_index=True)
expected = Series(
[
pd.Timestamp("2017-01-01", tz="US/Pacific"),
pd.Timestamp("2017-01-02", tz="US/Pacific"),
"a",
"b",
]
)
tm.assert_series_equal(result, expected)
def test_concat_categorical_unchanged():
# GH-12007
# test fix for when concat on categorical and float
# coerces dtype categorical -> float
df = DataFrame(Series(["a", "b", "c"], dtype="category", name="A"))
ser = Series([0, 1, 2], index=[0, 1, 3], name="B")
result = pd.concat([df, ser], axis=1)
expected = DataFrame(
{
"A": Series(["a", "b", "c", np.nan], dtype="category"),
"B": Series([0, 1, np.nan, 2], dtype="float"),
}
)
tm.assert_equal(result, expected)
def test_concat_empty_df_object_dtype():
# GH 9149
df_1 = DataFrame({"Row": [0, 1, 1], "EmptyCol": np.nan, "NumberCol": [1, 2, 3]})
df_2 = DataFrame(columns=df_1.columns)
result = pd.concat([df_1, df_2], axis=0)
expected = df_1.astype(object)
tm.assert_frame_equal(result, expected)
def test_concat_sparse():
# GH 23557
a = Series(SparseArray([0, 1, 2]))
expected = DataFrame(data=[[0, 0], [1, 1], [2, 2]]).astype(
pd.SparseDtype(np.int64, 0)
)
result = pd.concat([a, a], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_dense_sparse():
# GH 30668
a = Series(pd.arrays.SparseArray([1, None]), dtype=float)
b = Series([1], dtype=float)
expected = Series(data=[1, None, 1], index=[0, 1, 0]).astype(
pd.SparseDtype(np.float64, None)
)
result = pd.concat([a, b], axis=0)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("test_series", [True, False])
def test_concat_copy_index(test_series, axis):
# GH 29879
if test_series:
ser = Series([1, 2])
comb = concat([ser, ser], axis=axis, copy=True)
assert comb.index is not ser.index
else:
df = | DataFrame([[1, 2], [3, 4]], columns=["a", "b"]) | pandas.DataFrame |
import hashlib
import neptune.new as neptune
import pandas as pd
import xgboost as xgb
from neptune.new.integrations.xgboost import NeptuneCallback
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
# (neptune) create run
run = neptune.init(
project="<WORKSPACE/PROJECT>",
name="titanic-training",
tags=["XGBoost"],
)
# load data
data = pd.read_csv("train.csv")
# (neptune) log feature and target names
target = "Survived"
features = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Cabin", "Embarked"]
categorical_feature = ["Pclass", "Sex", "Cabin", "Embarked"]
run["data/target_name"] = target
run["data/features_names"] = features
run["data/categorical_features_names"] = categorical_feature
# (neptune) simple features analysis
women = data.loc[data.Sex == 'female']["Survived"]
run["data/analysis/women_survival_rate"] = sum(women)/len(women)
men = data.loc[data.Sex == 'male']["Survived"]
run["data/analysis/men_survival_rate"] = sum(men)/len(men)
# encode categorical features (OHE)
enc = OneHotEncoder(sparse=False)
enc_data = enc.fit_transform(data[categorical_feature])
data_cat = | pd.DataFrame(enc_data) | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
| tm.assert_equal(NaT != left, expected) | pandas._testing.assert_equal |
# last edited: 04/10/2021
#
# The functions pca_initial, pca_initial_, pca_final, and pca_final_ are adapted
# from a post by <NAME> here:
# https://nirpyresearch.com/classification-nir-spectra-principal-component-analysis-python/
#
# Retrieved in December 2020 and is licensed under Creative Commons Attribution 4.0
# International License. (https://creativecommons.org/licenses/by/4.0/)
#
#
# The function cluster_variance is adapted from a post by <NAME> here:
# https://medium.com/analytics-vidhya/choosing-the-best-k-value-for-k-means-clustering-d8b4616f8b86
#
# Retrieved in December 2020.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA as sk_pca
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from matplotlib import colors as c
def pca_initial(data): # Initial PCA function
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Initialise
skpca1 = sk_pca(n_components=30)
# Scale the features to have zero mean and standard deviation of 1
# This is important when correlating data with very different variances
nfeat1 = StandardScaler().fit_transform(feat)
# Fit the spectral data and extract the explained variance ratio
X1 = skpca1.fit(nfeat1)
expl_var_1 = X1.explained_variance_ratio_
# create scree plot
fig = plt.figure(dpi=100)
plt.bar(range(30), expl_var_1, label="Explained Variance %", color='blue', figure=fig)
plt.xticks(np.arange(len(expl_var_1)), np.arange(1, len(expl_var_1) + 1))
plt.plot(np.cumsum(expl_var_1), '-o', label='Cumulative variance %', color='green', figure=fig)
plt.xlabel('PC Number')
plt.legend()
return fig
def pca_initial_gui(data): # Initial PCA function (no standardscaler)
feat = (data.values[:, 3:]).astype('float64')
ncom = 30
# Initialise
skpca1 = sk_pca(n_components=ncom)
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
# nfeat1 = StandardScaler().fit_transform(feat)
# Fit the spectral data and extract the explained variance ratio
X1 = skpca1.fit(feat)
expl_var_1 = X1.explained_variance_ratio_
# create scree plot
fig = plt.figure(dpi=100, figsize=(10,5))
plt.bar(range(30), expl_var_1*100, label="Explained Variance %", color='blue', figure=fig)
plt.xticks(np.arange(len(expl_var_1)), np.arange(1, len(expl_var_1) + 1))
plt.plot(np.cumsum(expl_var_1)*100, '-o', label='Cumulative variance %', color='green', figure=fig)
plt.xlabel('PC Number')
plt.ylabel('Explained Variance (%)')
plt.legend()
return fig
def pca_final(data, ncomp): # PCA fitting with scores as result
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
nfeat1 = StandardScaler().fit_transform(feat)
skpca1 = sk_pca(n_components=ncomp)
# Transform on the scaled features
Xt1 = skpca1.fit_transform(nfeat1)
scores = pd.DataFrame(Xt1)
return scores
def pca_final_gui(data, ncomp): # PCA fitting with scores as result (no standardscaler)
# Read the features
feat = (data.values[:, 3:]).astype('float32')
# Scale the features to have zero mean and standard devisation of 1
# This is important when correlating data with very different variances
skpca1 = sk_pca(n_components=ncomp)
# Transform on the scaled features
Xt1 = skpca1.fit_transform(feat)
scores = | pd.DataFrame(Xt1) | pandas.DataFrame |
import glob
import pandas as pd
import datetime
import re
from constants import CAT_TO_SUBCAT, DATA_PATH_PATTERN
def get_expenses_df():
expenses = read_newest_csv()
expenses = clean_df(expenses)
expenses = aggregate_categories(expenses)
expenses = expenses.sort_values('Date', ascending=False)
print('cleaned and preprocessed data:')
print(expenses.head())
return expenses
def read_newest_csv():
""" Returns the newest splitwise-exported csv data as a dataframe,
assuming the splitwise date pattern in csv-filenames
"""
file_paths = glob.glob(DATA_PATH_PATTERN)
date_pattern = r'\d{4}-\d{2}-\d{2}'
dates = [re.search(date_pattern, path).group() for path in file_paths]
datetime_pattern = "%Y-%m-%d"
datetimes = [datetime.datetime.strptime(
date, datetime_pattern) for date in dates]
newest_file_index = datetimes.index(max(datetimes))
newest_file_path = file_paths[newest_file_index]
expenses = pd.read_csv(
newest_file_path, index_col='Date', parse_dates=True)
print("Read Splitwise data from ", newest_file_path)
return expenses
def clean_df(expenses):
""" select columns, delete balance and payment rows, convert types as needed
"""
columns_to_keep = ['Description', 'Category', 'Cost', 'Currency']
cleaned = expenses.filter(columns_to_keep)
cleaned = cleaned[cleaned['Category'] != 'Payment']
cleaned = cleaned[cleaned['Description'] != 'Total balance']
cleaned.Cost = | pd.to_numeric(cleaned.Cost, errors='coerce') | pandas.to_numeric |
import logging
import pandas as pd
import requests
import io
import re
import datetime
def create_elo_dict(db):
elo_dict = pd.read_csv('data/raw/elo_dictionary.csv', sep=';')[['fd.name', 'elo.name']]
elo_dict = elo_dict.rename(columns={'fd.name':'fd_name', 'elo.name':'elo_name'})
elo_dict['updated_untill'] = pd.to_datetime(None)
elo_dict.to_sql(name='elo_master', con=db, if_exists='replace', index=False)
db.execute("""update teams set elo_name = (
select `elo_name` from elo_master where teams.long_name = elo_master.`fd_name`)""")
def create_elo_scores(db):
logger = logging.getLogger(__name__)
teams = pd.read_sql("""select team_id, elo_name from teams""", db)
for idx, team in teams.iterrows():
try:
url = "http://api.clubelo.com/"+re.sub(' ', '', team.elo_name)
logger.info(url.strip())
s = requests.get(url).content
eloRank = pd.read_csv(io.StringIO(s.decode('utf-8')))
eloRank.From = | pd.to_datetime(eloRank.From) | pandas.to_datetime |
#!/usr/bin/env python
import pandas as pd
import click
from bokeh.io import vform
from bokeh.plotting import figure, show, output_file
from bokeh.models import CustomJS, ColumnDataSource
from bokeh.models.widgets import Select
from bokeh.palettes import (Blues9, BrBG9, BuGn9, BuPu9, GnBu9, Greens9,
Greys9, OrRd9, Oranges9, PRGn9, PiYG9, PuBu9,
PuBuGn9, PuOr9, PuRd9, Purples9, RdBu9, RdGy9,
RdPu9, RdYlBu9, RdYlGn9, Reds9, Spectral9, YlGn9,
YlGnBu9, YlOrBr9, YlOrRd9)
standard_palettes = dict([("Blues", Blues9), ("BrBG", BrBG9),
("BuGn", BuGn9), ("BuPu", BuPu9),
("GnBu", GnBu9), ("Greens", Greens9),
("Greys", Greys9), ("OrRd", OrRd9),
("Oranges", Oranges9), ("PRGn", PRGn9),
("PiYG", PiYG9), ("PuBu", PuBu9),
("PuBuGn", PuBuGn9), ("PuOr", PuOr9),
("PuRd", PuRd9), ("Purples", Purples9),
("RdBu", RdBu9), ("RdGy", RdGy9),
("RdPu", RdPu9), ("RdYlBu", RdYlBu9),
("RdYlGn", RdYlGn9), ("Reds", Reds9),
("Spectral", Spectral9), ("YlGn", YlGn9),
("YlGnBu", YlGnBu9), ("YlOrBr", YlOrBr9),
("YlOrRd", YlOrRd9)])
@click.command()
@click.option('--input_fp', '-i', type=click.Path(
exists=True, dir_okay=False, readable=True, resolve_path=True),
help='Input metadata file')
@click.option('--output_fp', '-o', type=click.Path(
dir_okay=False, writable=True, resolve_path=True),
help='Output filepath')
@click.option('--title', '-t', type=str, help='Title of the graph')
@click.option('--seperator', '-s', required=False, type=str, default=',',
help='Seperator on the file (Default ",")')
@click.option('--color_by', '-c', required=False, type=str,
help='Column name to color the data by (Default None)')
@click.option('--palette', '-p', required=False,
help='Color palette to use, or string in the form '
'group1:color,group2:color\n'
'Possible palletes:\n' + ', '.join(standard_palettes.keys()))
@click.option('--legend_pos', '-l', required=False, default='top_right',
type=click.Choice(
['top_right', 'top_left', 'bottom_right', 'bottom_left']),
help='Seperator on the file (Default ",")')
@click.argument('remcols', nargs=-1, type=str)
def build_bokeh(input_fp, output_fp, title, seperator=',', remcols=None,
color_by=None, palette=None, legend_pos='top_right'):
data = pd.DataFrame.from_csv(input_fp, sep=seperator)
# Put depth as first column
cols = data.columns.tolist()
popped = cols.pop(7)
cols.insert(0, popped)
data = data[cols]
# Change odd depths to regular ones
data['Depth (m)'].replace(47, 50, inplace=True)
#data['Depth (m)'].replace(258, 200, inplace=True)
# Drop unwanted columns if needed
if remcols:
remcols = list(remcols)
data.drop(remcols, axis=1, inplace=True)
# Build out the colors for the graph if needed
legend = []
if color_by is not None:
groups = data.groupby(color_by).groups
group_list = sorted(groups.keys(), reverse=True)
# Grab colormap or use provided
if len(palette.split(',')) > 1:
# Format into colormap-like object
p = palette.split(',')
hold = dict(x.split(':') for x in p)
colormap = [hold[str(g)] for g in group_list]
elif len(groups) > 9:
raise ValueError("Can only support up to 9 groups, "
"%s has %d groups" % color_by, len(groups))
else:
colormap = standard_palettes[palette]
# Build colormap
index = []
colors = []
for group_num, group in enumerate(group_list):
vals = groups[group]
index.extend(vals)
colors.extend([colormap[group_num]] * len(vals))
# build legend colors list
legend.append((str(group), colormap[group_num]))
data['colormap'] = | pd.Series(colors, index=index) | pandas.Series |
import torch
import torch.nn as nn
from torchvision import transforms as TF
from models.NIMA_model.nima import NIMA
import argparse
import os
from PIL import Image
import pandas as pd
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
transforms = TF.Compose([
TF.Resize((224,224)),
TF.ToTensor(),
TF.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
def visualize_test(img, new_img_path, mean_score, std_score):
mean_score = np.round(mean_score, 3)
std_score = np.round(std_score, 3)
title = f'{mean_score} ~ {std_score}'
plt.title(title)
plt.axis('off')
plt.imshow(img)
plt.savefig(new_img_path)
def plot_score(scores_list, output_path, figsize = (15,15)):
cnt_dict = {}
for score in scores_list:
if int(score) not in cnt_dict.keys():
cnt_dict[int(score)] = 1
else:
cnt_dict[int(score)] += 1
cnt_dict = {k: v for k, v in sorted(cnt_dict.items(), key=lambda item: item[0])}
fig = plt.figure(figsize = figsize)
plt.plot(list(cnt_dict.keys()), list(cnt_dict.values()))
plt.xlabel('Scores')
plt.ylabel('Number of images')
plt.title('Score distribution')
plt.savefig(os.path.join(output_path, f'distribution.png'))
plt.close(fig)
def eval(args):
device = torch.device("cuda" if args.cuda else "cpu")
if args.images is not None:
input_path, output_path = args.images.split(':')
if not os.path.exists(output_path):
os.mkdir(output_path)
model = NIMA().to(device)
data = {
'image name':[],
'mean scores':[],
'std scores': []}
img_paths = os.listdir(input_path)
with torch.no_grad():
for img_path in tqdm(img_paths):
old_img_path = os.path.join(input_path, img_path)
new_img_path = os.path.join(output_path, img_path)
img = Image.open(old_img_path)
img_tensor = transforms(img).unsqueeze(0).to(device)
score = model(img_tensor)
mean_score = score['mean']
std_score = score['std']
data['image name'].append(img_path)
data['mean scores'].append(mean_score)
data['std scores'].append(std_score)
visualize_test(img, new_img_path, mean_score, std_score)
if args.csv_output is not None:
df = | pd.DataFrame(data) | pandas.DataFrame |
"""Unit tests for track_reanalysis.py."""
import copy
import unittest
import numpy
import pandas
from gewittergefahr.gg_utils import track_reanalysis
from gewittergefahr.gg_utils import temporal_tracking
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
TOLERANCE = 1e-6
ORIG_X_COORDS_METRES = numpy.array([
20, 24, 28, 32,
36, 40, 44,
36, 40, 44, 48,
10, 15, 20, 25, 30, 35,
8, 13, 18, 23, 28, 33,
40, 41,
20, 22, 24, 26, 28, 30, 32,
0, 5, 10, 15, 20,
25, 28, 31, 34, 37, 40
], dtype=float)
ORIG_Y_COORDS_METRES = numpy.array([
100, 101, 102, 103,
105, 110, 115,
103, 101, 99, 97,
70, 70, 70, 70, 70, 70,
55, 57, 59, 61, 63, 65,
67, 68,
30, 32, 34, 36, 38, 40, 42,
0, -7.5, -15, -22.5, -30,
-30, -31.5, -33, -34.5, -36, -37.5
])
ORIG_X_VELOCITIES_M_S01 = numpy.array([
4, 4, 4, 4,
4, 4, 4,
4, 4, 4, 4,
5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5,
1, 1,
2, 2, 2, 2, 2, 2, 2,
5, 5, 5, 5, 5,
3, 3, 3, 3, 3, 3
], dtype=float)
ORIG_Y_VELOCITIES_M_S01 = numpy.array([
1, 1, 1, 1,
5, 5, 5,
-2, -2, -2, -2,
0, 0, 0, 0, 0, 0,
2, 2, 2, 2, 2, 2,
1, 1,
2, 2, 2, 2, 2, 2, 2,
-7.5, -7.5, -7.5, -7.5, -7.5,
-1.5, -1.5, -1.5, -1.5, -1.5, -1.5
])
ORIG_TIMES_UNIX_SEC = numpy.array([
0, 1, 2, 3,
5, 6, 7,
5, 6, 7, 8,
2, 3, 4, 5, 6, 7,
2, 3, 4, 5, 6, 7,
9, 10,
3, 4, 5, 6, 7, 8, 9,
5, 6, 7, 8, 9,
12, 13, 14, 15, 16, 17
], dtype=int)
ORIG_PRIMARY_ID_STRINGS = [
'A', 'A', 'A', 'A',
'B', 'B', 'B',
'C', 'C', 'C', 'C',
'D', 'D', 'D', 'D', 'D', 'D',
'E', 'E', 'E', 'E', 'E', 'E',
'F', 'F',
'G', 'G', 'G', 'G', 'G', 'G', 'G',
'H', 'H', 'H', 'H', 'H',
'J', 'J', 'J', 'J', 'J', 'J'
]
ORIG_SECONDARY_ID_STRINGS = [
'01', '01', '01', '01',
'02', '02', '02',
'03', '03', '03', '03',
'04', '04', '04', '04', '04', '04',
'05', '05', '05', '05', '05', '05',
'06', '06',
'07', '07', '07', '07', '07', '07', '07',
'08', '08', '08', '08', '08',
'10', '10', '10', '10', '10', '10'
]
ORIG_FIRST_PREV_SECONDARY_IDS = [
'', '01', '01', '01',
'', '02', '02',
'', '03', '03', '03',
'', '04', '04', '04', '04', '04',
'', '05', '05', '05', '05', '05',
'', '06',
'', '07', '07', '07', '07', '07', '07',
'', '08', '08', '08', '08',
'', '10', '10', '10', '10', '10'
]
ORIG_SECOND_PREV_SECONDARY_IDS = [''] * len(ORIG_TIMES_UNIX_SEC)
ORIG_FIRST_NEXT_SECONDARY_IDS = [
'01', '01', '01', '',
'02', '02', '',
'03', '03', '03', '',
'04', '04', '04', '04', '04', '',
'05', '05', '05', '05', '05', '',
'06', '',
'07', '07', '07', '07', '07', '07', '',
'08', '08', '08', '08', '',
'10', '10', '10', '10', '10', ''
]
ORIG_SECOND_NEXT_SECONDARY_IDS = [''] * len(ORIG_TIMES_UNIX_SEC)
THIS_DICT = {
temporal_tracking.CENTROID_X_COLUMN: ORIG_X_COORDS_METRES,
temporal_tracking.CENTROID_Y_COLUMN: ORIG_Y_COORDS_METRES,
temporal_tracking.X_VELOCITY_COLUMN: ORIG_X_VELOCITIES_M_S01,
temporal_tracking.Y_VELOCITY_COLUMN: ORIG_Y_VELOCITIES_M_S01,
tracking_utils.VALID_TIME_COLUMN: ORIG_TIMES_UNIX_SEC,
tracking_utils.PRIMARY_ID_COLUMN: ORIG_PRIMARY_ID_STRINGS,
tracking_utils.SECONDARY_ID_COLUMN: ORIG_SECONDARY_ID_STRINGS,
tracking_utils.FIRST_PREV_SECONDARY_ID_COLUMN:
ORIG_FIRST_PREV_SECONDARY_IDS,
tracking_utils.SECOND_PREV_SECONDARY_ID_COLUMN:
ORIG_SECOND_PREV_SECONDARY_IDS,
tracking_utils.FIRST_NEXT_SECONDARY_ID_COLUMN:
ORIG_FIRST_NEXT_SECONDARY_IDS,
tracking_utils.SECOND_NEXT_SECONDARY_ID_COLUMN:
ORIG_SECOND_NEXT_SECONDARY_IDS
}
ORIG_STORM_OBJECT_TABLE = | pandas.DataFrame.from_dict(THIS_DICT) | pandas.DataFrame.from_dict |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series([Timestamp('20130101 9:06:00.005'), Timestamp(
'20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series([Timestamp('20130101 9:01:01'), Timestamp(
'20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
s + op(5)
op(5) + s
def test_timedelta_series_ops(self):
# GH11925
s = Series(timedelta_range('1 day', periods=3))
ts = Timestamp('2012-01-01')
expected = Series(date_range('2012-01-02', periods=3))
assert_series_equal(ts + s, expected)
assert_series_equal(s + ts, expected)
expected2 = Series(date_range('2011-12-31', periods=3, freq='-1D'))
assert_series_equal(ts - s, expected2)
assert_series_equal(ts + (-s), expected2)
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3), timedelta(
minutes=5, seconds=6), timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) - Series([timedelta(
seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) - Series([timedelta(
seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2, td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result, expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
for dtype in ['int32', 'int16', 'uint32', 'uint64', 'uint32', 'uint16',
'uint8']:
s2 = Series([20, 30, 40], dtype=dtype)
expected = Series(
s1.values.astype(np.int64) * s2.astype(np.int64),
dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result, expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result, expected)
# invalid ops
assert_series_equal(s1 / s2.astype(float),
Series([Timedelta('2 days 22:48:00'), Timedelta(
'1 days 23:12:00'), Timedelta('NaT')]))
assert_series_equal(s1 / 2.0,
Series([Timedelta('29 days 12:00:00'), Timedelta(
'29 days 12:00:00'), Timedelta('NaT')]))
for op in ['__add__', '__sub__']:
sop = getattr(s1, op, None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D', 'h', 'm', 's', 'ms', 'us', 'ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m, unit))
result = s1 / np.timedelta64(m, unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(
lambda x: Timedelta(np.timedelta64(m, unit)) / x)
result = np.timedelta64(m, unit) / s1
# astype
s = Series(date_range('20130101', periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0], datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0], timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'), Timestamp(
'20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
# TODO: unused
# npy16_mappings = {'D': 24 * 60 * 60 * 1000000,
# 'h': 60 * 60 * 1000000,
# 'm': 60 * 1000000,
# 's': 1000000,
# 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},"
"s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s,
us, lhs, rhs))
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
# ## timedelta64 ###
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5, seconds=4)
ops = ['__mul__', '__floordiv__', '__pow__', '__rmul__',
'__rfloordiv__', '__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
# ## datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
# ## datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
# ## timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rmul__', '__rfloordiv__', '__rtruediv__',
'__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
tz = 'US/Eastern'
dt1 = Series(date_range('2000-01-01 09:00:00', periods=5,
tz=tz), name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min', periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2[0]
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = td2[0] + dt2
exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1[0]
exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 + td2
exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt1 - td1
exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz)
assert_series_equal(result, exp)
result = dt2 - td2
exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz)
assert_series_equal(result, exp)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_sub_single_tz(self):
# GH12290
s1 = Series([pd.Timestamp('2016-02-10', tz='America/Sao_Paulo')])
s2 = Series([pd.Timestamp('2016-02-08', tz='America/Sao_Paulo')])
result = s1 - s2
expected = Series([Timedelta('2days')])
assert_series_equal(result, expected)
result = s2 - s1
expected = Series([Timedelta('-2days')])
assert_series_equal(result, expected)
def test_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
datetime_series = Series([NaT, Timestamp('19900315')])
nat_series_dtype_timedelta = Series(
[NaT, NaT], dtype='timedelta64[ns]')
nat_series_dtype_timestamp = Series([NaT, NaT], dtype='datetime64[ns]')
single_nat_dtype_datetime = Series([NaT], dtype='datetime64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta)
assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(datetime_series - NaT, nat_series_dtype_timestamp)
assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp)
assert_series_equal(datetime_series - single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + datetime_series
assert_series_equal(datetime_series - single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta + datetime_series,
nat_series_dtype_timestamp)
# without a Series wrapping the NaT, it is ambiguous
# whether it is a datetime64 or timedelta64
# defaults to interpreting it as timedelta64
assert_series_equal(nat_series_dtype_timestamp - NaT,
nat_series_dtype_timestamp)
assert_series_equal(-NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_datetime,
nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
-single_nat_dtype_datetime + nat_series_dtype_timestamp
assert_series_equal(nat_series_dtype_timestamp -
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(-single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
with tm.assertRaises(TypeError):
timedelta_series - single_nat_dtype_datetime
# addition
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta)
assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta)
assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timestamp + NaT,
nat_series_dtype_timestamp)
assert_series_equal(NaT + nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timestamp +
single_nat_dtype_timedelta,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timestamp,
nat_series_dtype_timestamp)
assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_datetime,
nat_series_dtype_timestamp)
assert_series_equal(single_nat_dtype_datetime +
nat_series_dtype_timedelta,
nat_series_dtype_timestamp)
# multiplication
assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
assert_series_equal(timedelta_series * 1, timedelta_series)
assert_series_equal(1 * timedelta_series, timedelta_series)
assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
assert_series_equal(timedelta_series * nan, nat_series_dtype_timedelta)
assert_series_equal(nan * timedelta_series, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
datetime_series * 1
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1
with tm.assertRaises(TypeError):
datetime_series * 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp * 1.0
# division
assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
assert_series_equal(timedelta_series / nan, nat_series_dtype_timedelta)
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1.0
with tm.assertRaises(TypeError):
nat_series_dtype_timestamp / 1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s, s2), (s2, s)]:
self.assertRaises(TypeError, lambda: x == y)
self.assertRaises(TypeError, lambda: x != y)
self.assertRaises(TypeError, lambda: x >= y)
self.assertRaises(TypeError, lambda: x > y)
self.assertRaises(TypeError, lambda: x < y)
self.assertRaises(TypeError, lambda: x <= y)
def test_more_na_comparisons(self):
for dtype in [None, object]:
left = Series(['a', np.nan, 'c'], dtype=dtype)
right = Series(['a', np.nan, 'd'], dtype=dtype)
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_nat_comparisons(self):
data = [([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')]),
([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')],
[pd.NaT, pd.NaT, pd.Timedelta('3 days')]),
([pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')],
[pd.NaT, pd.NaT, pd.Period('2011-03', freq='M')])]
# add lhs / rhs switched data
data = data + [(r, l) for l, r in data]
for l, r in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
# Series, Index
for right in [Series(r, dtype=dtype), Index(r, dtype=dtype)]:
expected = Series([False, False, True])
assert_series_equal(left == right, expected)
expected = Series([True, True, False])
assert_series_equal(left != right, expected)
expected = Series([False, False, False])
assert_series_equal(left < right, expected)
expected = Series([False, False, False])
assert_series_equal(left > right, expected)
expected = Series([False, False, True])
assert_series_equal(left >= right, expected)
expected = Series([False, False, True])
assert_series_equal(left <= right, expected)
def test_nat_comparisons_scalar(self):
data = [[pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')],
[pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')],
[pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]]
for l in data:
for dtype in [None, object]:
left = Series(l, dtype=dtype)
expected = Series([False, False, False])
assert_series_equal(left == pd.NaT, expected)
assert_series_equal(pd.NaT == left, expected)
expected = Series([True, True, True])
assert_series_equal(left != pd.NaT, expected)
assert_series_equal(pd.NaT != left, expected)
expected = Series([False, False, False])
assert_series_equal(left < pd.NaT, expected)
assert_series_equal(pd.NaT > left, expected)
assert_series_equal(left <= pd.NaT, expected)
assert_series_equal(pd.NaT >= left, expected)
assert_series_equal(left > pd.NaT, expected)
assert_series_equal(pd.NaT < left, expected)
assert_series_equal(left >= pd.NaT, expected)
assert_series_equal(pd.NaT <= left, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([False, True, False], list('abc'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False], list('abc'))
result = a | b
assert_series_equal(result, expected)
expected = Series([True, False, False], list('abc'))
result = a ^ b
assert_series_equal(result, expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([False, True, False, False], list('abcd'))
result = a & b
assert_series_equal(result, expected)
expected = Series([True, True, False, False], list('abcd'))
result = a | b
assert_series_equal(result, expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result, expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result, expected)
# vs non-matching
result = a & Series([1], ['z'])
expected = Series([False, False, False, False], list('abcz'))
assert_series_equal(result, expected)
result = a | Series([1], ['z'])
expected = Series([True, True, False, False], list('abcz'))
assert_series_equal(result, expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]), Series([1], ['z']),
Series(np.nan, b.index), Series(np.nan, a.index)]:
result = a[a | e]
assert_series_equal(result, a[a])
for e in [Series(['z'])]:
if compat.PY3:
with tm.assert_produces_warning(RuntimeWarning):
result = a[a | e]
else:
result = a[a | e]
assert_series_equal(result, a[a])
# vs scalars
index = list('bca')
t = Series([True, False, True])
for v in [True, 1, 2]:
result = Series([True, False, True], index=index) | v
expected = Series([True, True, True], index=index)
assert_series_equal(result, expected)
for v in [np.nan, 'foo']:
self.assertRaises(TypeError, lambda: t | v)
for v in [False, 0]:
result = Series([True, False, True], index=index) | v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [True, 1]:
result = Series([True, False, True], index=index) & v
expected = Series([True, False, True], index=index)
assert_series_equal(result, expected)
for v in [False, 0]:
result = Series([True, False, True], index=index) & v
expected = Series([False, False, False], index=index)
assert_series_equal(result, expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda: t & v)
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, 'index']:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = 'No axis named 1 for object type'
for op in ['eq', 'ne', 'le', 'le', 'gt', 'ge']:
with tm.assertRaisesRegexp(ValueError, msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list('abcd'))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list('abcd'))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list('abcd'))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list('abc'))
right = Series([2, 2, 2], index=list('bcd'))
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list('abcd'))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list('abcd'))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list('abcd'))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list('abcd'))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8), dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff.reindex(list('abc'))
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft.reindex(list('abc'))
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1] * 4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a', 'b', 'c', 'd'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=['b', 'c', 'a', 0, 1, 2, 3])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_tft & s_0123, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_tft & s_0123, exp)
# s_tft will be all false now because of reindexing like s_0123
if compat.PY3:
# unable to sort incompatible object via .union.
exp = Series([False] * 7, index=[0, 1, 2, 3, 'b', 'c', 'a'])
with tm.assert_produces_warning(RuntimeWarning):
assert_series_equal(s_0123 & s_tft, exp)
else:
exp = Series([False] * 7, index=[0, 1, 2, 3, 'a', 'b', 'c'])
assert_series_equal(s_0123 & s_tft, exp)
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]),
Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a', 'b', np.NaN, 'd'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True, index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
# this is an alignment issue; these are equivalent
# https://github.com/pydata/pandas/issues/5284
self.assertRaises(ValueError, lambda: d.__and__(s, axis='columns'))
self.assertRaises(ValueError, tester, s, d)
# this is wrong as its not a boolean result
# result = d.__and__(s,axis='index')
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = Series(self.ts.values[:-5] + int_ts.values,
index=self.ts.index[:-5], name='ts')
self.assert_series_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10), dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_arith_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
exp = pd.Series([3.0, 4.0, np.nan, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 + s2, exp)
tm.assert_series_equal(s2 + s1, exp)
exp = pd.DataFrame({'x': [3.0, 4.0, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() + s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() + s1.to_frame(), exp)
# different length
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
exp = pd.Series([3, 4, 5, np.nan],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 + s4, exp)
tm.assert_series_equal(s4 + s3, exp)
exp = pd.DataFrame({'x': [3, 4, 5, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() + s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() + s3.to_frame(), exp)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s2 = pd.Series([2, 2, 2], index=list('ABD'), name='x')
s3 = pd.Series([1, 2, 3], index=list('ABC'), name='x')
s4 = pd.Series([2, 2, 2, 2], index=list('ABCD'), name='x')
for l, r in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with tm.assertRaisesRegexp(ValueError, msg):
l == r
with tm.assertRaisesRegexp(ValueError, msg):
l != r
with tm.assertRaisesRegexp(ValueError, msg):
l < r
msg = "Can only compare identically-labeled DataFrame objects"
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() == r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() != r.to_frame()
with tm.assertRaisesRegexp(ValueError, msg):
l.to_frame() < r.to_frame()
def test_bool_ops_df_compat(self):
# GH 1134
s1 = pd.Series([True, False, True], index=list('ABC'), name='x')
s2 = pd.Series([True, True, False], index=list('ABD'), name='x')
exp = pd.Series([True, False, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 & s2, exp)
tm.assert_series_equal(s2 & s1, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s1 | s2, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, False, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s2 | s1, exp)
# DataFrame doesn't fill nan with False
exp = pd.DataFrame({'x': [True, False, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() & s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() & s1.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, np.nan, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s1.to_frame() | s2.to_frame(), exp)
tm.assert_frame_equal(s2.to_frame() | s1.to_frame(), exp)
# different length
s3 = pd.Series([True, False, True], index=list('ABC'), name='x')
s4 = pd.Series([True, True, True, True], index=list('ABCD'), name='x')
exp = pd.Series([True, False, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 & s4, exp)
tm.assert_series_equal(s4 & s3, exp)
# np.nan | True => np.nan, filled with False
exp = pd.Series([True, True, True, False],
index=list('ABCD'), name='x')
tm.assert_series_equal(s3 | s4, exp)
# True | np.nan => True
exp = pd.Series([True, True, True, True],
index=list('ABCD'), name='x')
tm.assert_series_equal(s4 | s3, exp)
exp = pd.DataFrame({'x': [True, False, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() & s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() & s3.to_frame(), exp)
exp = pd.DataFrame({'x': [True, True, True, np.nan]},
index=list('ABCD'))
tm.assert_frame_equal(s3.to_frame() | s4.to_frame(), exp)
tm.assert_frame_equal(s4.to_frame() | s3.to_frame(), exp)
def test_series_frame_radd_bug(self):
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
with tm.assertRaises(TypeError):
datetime.now() + self.ts
with tm.assertRaises(TypeError):
self.ts + datetime.now()
def test_series_radd_more(self):
data = [[1, 2, 3],
[1.1, 2.2, 3.3],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02'),
pd.NaT],
['x', 'y', 1]]
for d in data:
for dtype in [None, object]:
s = Series(d, dtype=dtype)
with tm.assertRaises(TypeError):
'foo_' + s
for dtype in [None, object]:
res = 1 + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([2, 3, 4], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + 1
tm.assert_series_equal(res, exp)
res = np.nan + pd.Series([1, 2, 3], dtype=dtype)
exp = pd.Series([np.nan, np.nan, np.nan], dtype=dtype)
tm.assert_series_equal(res, exp)
res = pd.Series([1, 2, 3], dtype=dtype) + np.nan
tm.assert_series_equal(res, exp)
s = pd.Series([pd.Timedelta('1 days'), pd.Timedelta('2 days'),
pd.Timedelta('3 days')], dtype=dtype)
exp = pd.Series([pd.Timedelta('4 days'), | pd.Timedelta('5 days') | pandas.Timedelta |
"""
**EMR Data Censoring Function**
Contains source code for :ref:`censorData` tool.
"""
import pandas as pd
import numpy as np
def censor_diagnosis(genotype_file, phenotype_file, final_pfile, final_gfile, efield, delta_field=None, start_time=np.nan, end_time=np.nan):
"""
Specify a range of ages for censoring event data, such that ``efield`` ages are
censored to the range
:math:`start \leq efield \leq end`
Instead of censoring based on absolute age, you may also censor with respect to
another data field using the ``delta_field``. If specified, the data is
censored based on the *interval between* ``delta_field`` and ``efield``:
:math:`start \leq deltafield - efield \leq end`.
Censored event data is saved to ``final_pfile``. Subjects with data remaining
after censoring are saved to ``final_gfile``.
:param genotype_file: path to input group file
:param phenotype_file: path to input phenotype file
:param final_pfile: path to output group file
:param final_gfile: path to output group file
:param efield: name of field in the phenotype file to be censored
:param delta_field: name of field to censor with respect to (i.e. interval between ``delta_field`` and ``efield``) [default: None]
:param start_time: start time for censoring in years [default: None]
:param end_time: end time for censoring in years [default: None]
:type genotype_file: str
:type phenotype_file: str
:type final_pfile: str
:type final_gfile: str
:type efield: str
:type delta_field: str
:type start_time: float
:type end_time: float
:returns: None
.. note:: Either ``start_time`` and/or ``end_time`` must be given.
"""
# read files & check field names
print('Reading input files')
genotypes = pd.read_csv(genotype_file)
phenotypes = | pd.read_csv(phenotype_file) | pandas.read_csv |
# Copyright (c) 2019-2021 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# https://github.com/boschresearch/pylife
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, copy
import warnings
import pytest
import numpy as np
import pandas as pd
import numpy.testing as testing
import pylife.strength.meanstress as MST
from pylife.strength.sn_curve import FiniteLifeCurve
def goodman_signal_sm():
Sm = np.array([-4., -2., -1., 0., 0.4, 2./3., 7./6.])
Sa = np.array([ 2., 2., 3./2., 1., 0.8, 2./3., 7./12.])
return pd.DataFrame({'sigma_m': Sm, 'sigma_a': Sa })
def goodman_signal_r():
Sm = np.array([-4., -2., -1., 0., 0.4, 2./3., 7./6.])
Sa = np.array([ 2., 2., 3./2., 1., 0.8, 2./3., 7./12.])
warnings.simplefilter('ignore', RuntimeWarning)
R = (Sm-Sa)/(Sm+Sa)
warnings.simplefilter('default', RuntimeWarning)
return pd.DataFrame({'sigma_a': Sa, 'R': R})
def five_segment_signal_sm():
Sm = np.array([-12./5., -2., -1., 0., 2./5., 2./3., 7./6., 1.+23./75., 2.+1./150., 3.+11./25., 3.+142./225.])
Sa = np.array([ 6./5., 2., 3./2., 1., 4./5., 2./3., 7./12., 14./25., 301./600., 86./225., 43./225.])
return pd.DataFrame({'sigma_m': Sm, 'sigma_a': Sa })
def five_segment_signal_r():
Sm = np.array([-12./5., -2., -1., 0., 2./5., 2./3., 7./6., 1.+23./75., 2.+1./150., 3.+11./25., 3.+142./225.])
Sa = np.array([ 6./5., 2., 3./2., 1., 4./5., 2./3., 7./12., 14./25., 301./600., 86./225., 43./225.])
warnings.simplefilter('ignore', RuntimeWarning)
R = (Sm-Sa)/(Sm+Sa)
warnings.simplefilter('default', RuntimeWarning)
return pd.DataFrame({'sigma_a': Sa, 'R': R })
def test_FKM_goodman_plain_sm():
cyclic_signal = goodman_signal_sm()
Sa = cyclic_signal.sigma_a.to_numpy()
Sm = cyclic_signal.sigma_m.to_numpy()
M = 0.5
R_goal = 1.
testing.assert_raises(ValueError, MST.FKM_goodman, Sa, Sm, M, M/3, R_goal)
R_goal = -1.
res = MST.FKM_goodman(Sa, Sm, M, M/3, R_goal)
np.testing.assert_array_almost_equal(res, np.ones_like(res))
Sm = np.array([5])
Sa = np.array([0])
res = MST.FKM_goodman(Sa, Sm, M, M/3, R_goal)
assert np.equal(res,0.)
def test_FKM_goodman_single_M_sm():
cyclic_signal = goodman_signal_sm()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(pd.Series({ 'M':M, 'M2':M/3 }), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_FKM_goodman_single_M_R():
cyclic_signal = goodman_signal_r()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(pd.Series({ 'M':M, 'M2':M/3 }), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_FKM_goodman_multiple_M_sm():
cyclic_signal = goodman_signal_sm()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(pd.DataFrame({ 'M':[M]*7, 'M2':[M/3]*7, }), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_FKM_goodman_multiple_M_sm():
cyclic_signal = goodman_signal_r()
M = 0.5
R_goal = -1.
res = cyclic_signal.meanstress_mesh.FKM_goodman(pd.DataFrame({ 'M':[M]*7, 'M2':[M/3]*7, }), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_five_segment_plain_sm():
cyclic_signal = five_segment_signal_sm()
Sa = cyclic_signal.sigma_a.to_numpy()
Sm = cyclic_signal.sigma_m.to_numpy()
M0= 0.5
M1 = M0/3.
M2 = M0/6.
M3 = 1.
M4 = -2.
R12 = 2./5.
R23 = 4./5.
R_goal = 1.
testing.assert_raises(ValueError, MST.five_segment_correction, Sa, Sm, M0, M1, M2, M3, M4, R12, R23, R_goal)
res = MST.five_segment_correction(Sa, Sm, M0=M0, M1=M1, M2=M2, M3=M3, M4=M4, R12=R12, R23=R23, R_goal=-1)
np.testing.assert_allclose(res, np.ones_like(res))
R_goal = -1.
res = MST.five_segment_correction(Sa, Sm, M0, M1, M2, M3, M4, R12, R23, R_goal)
np.testing.assert_array_almost_equal(res, np.ones_like(res))
Sm = np.array([5])
Sa = np.array([0])
res = MST.five_segment_correction(Sa, Sm, M0, M1, M2, M3, M4, R12, R23, R_goal)
assert np.equal(res,0.)
Sm = np.array([5, 5])
Sa = np.array([0, 0])
R_goal = 0.1
res = MST.five_segment_correction(Sa, Sm, M0, M1, M2, M3, M4, R12, R23, R_goal)
assert np.array_equal(res,np.array([0., 0.]))
def test_five_segment_single_M_sm():
cyclic_signal = five_segment_signal_sm()
M0= 0.5
M1 = M0/3.
M2 = M0/6.
M3 = 1.
M4 = -2.
R12 = 2./5.
R23 = 4./5.
R_goal = -1.
res = cyclic_signal.meanstress_mesh.five_segment(pd.Series({
'M0': M0, 'M1': M1, 'M2': M2, 'M3': M3, 'M4': M4,
'R12': R12, 'R23': R23
}), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
@pytest.mark.parametrize("Sm, Sa", [(np.array([row.sigma_m]), np.array([row.sigma_a])) for _, row in five_segment_signal_sm().iterrows()])
def test_five_segment_single_M_backwards(Sm, Sa):
cyclic_signal = pd.DataFrame({'sigma_a': [1.0], 'sigma_m': [0.0]})
M0= 0.5
M1 = M0/3.
M2 = M0/6.
M3 = 1.
M4 = -2.
R12 = 2./5.
R23 = 4./5.
warnings.simplefilter('ignore', RuntimeWarning)
R_goal = (Sm-Sa)/(Sm+Sa)
warnings.simplefilter('default', RuntimeWarning)
res = cyclic_signal.meanstress_mesh.five_segment(pd.Series({
'M0': M0, 'M1': M1, 'M2': M2, 'M3': M3, 'M4': M4,
'R12': R12, 'R23': R23
}), R_goal)
np.testing.assert_array_almost_equal(res.sigma_a, Sa)
def test_five_segment_single_M_R():
cyclic_signal = five_segment_signal_r()
M0= 0.5
M1 = M0/3.
M2 = M0/6.
M3 = 1.
M4 = -2.
R12 = 2./5.
R23 = 4./5.
R_goal = -1.
res = cyclic_signal.meanstress_mesh.five_segment(pd.Series({
'M0': M0, 'M1': M1, 'M2': M2, 'M3': M3, 'M4': M4,
'R12': R12, 'R23': R23
}), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
def test_five_segment_multiple_M_sm():
cyclic_signal = five_segment_signal_sm()
M0= 0.5
M1 = M0/3.
M2 = M0/6.
M3 = 1.
M4 = -2.
R12 = 2./5.
R23 = 4./5.
R_goal = -1.
res = cyclic_signal.meanstress_mesh.five_segment(pd.DataFrame({
'M0': [M0]*11, 'M1': [M1]*11, 'M2': [M2]*11, 'M3': [M3]*11, 'M4': [M4]*11,
'R12': [R12]*11, 'R23': [R23]*11
}), R_goal).sigma_a
np.testing.assert_array_almost_equal(res, np.ones_like(res))
@pytest.mark.parametrize("R_goal, expected", [ # all calculated by pencil on paper
(-1., 2.0),
(0., 4./3.),
(-1./3., 8./5.),
(1./3., 14./12.)
])
def test_FKM_goodman_hist_range_mean(R_goal, expected):
rg = pd.IntervalIndex.from_breaks(np.linspace(0, 2, 25), closed='left')
mn = pd.IntervalIndex.from_breaks(np.linspace(0, 2, 25), closed='left')
df = pd.Series(np.zeros(24*24), name='cycles',
index=pd.MultiIndex.from_product([rg, mn], names=['range', 'mean']))
df.loc[(7./6. - 1./24., 7./6.)] = 1.
df.loc[(4./3. - 1./24., 2./3.)] = 3.
df.loc[(2. - 1./24., 0.)] = 5.
haigh = pd.Series({'M': 0.5, 'M2': 0.5/3.})
res = df.meanstress_hist.FKM_goodman(haigh, R_goal)
test_interval = pd.Interval(expected-1./96., expected+1./96.)
assert res.loc[res.index.overlaps(test_interval)].sum() == 9
assert res.loc[np.logical_not(res.index.overlaps(test_interval))].sum() == 0
@pytest.mark.parametrize("R_goal, expected", [ # all calculated by pencil on paper
(-1., 2.0),
(0., 4./3.),
(-1./3., 8./5.),
(1./3., 14./12.)
])
def test_FKM_goodman_hist_from_to(R_goal, expected):
fr = pd.IntervalIndex.from_breaks(np.linspace(-1., 1., 49), closed='left')
to = pd.IntervalIndex.from_breaks(np.linspace(0, 2., 49), closed='left')
df = pd.Series(np.zeros(48*48), name='cycles',
index=pd.MultiIndex.from_product([fr, to], names=['from', 'to']))
df.loc[(14./24., 21./12.)] = 1
df.loc[(0., 4./3.)] = 3
df.loc[(-1., 1.)] = 5
haigh = pd.Series({'M': 0.5, 'M2': 0.5/3.})
res = df.meanstress_hist.FKM_goodman(haigh, R_goal)
test_interval = | pd.Interval(expected-1./96., expected+1./96.) | pandas.Interval |
from scipy.sparse import issparse, isspmatrix
import numpy as np
import pandas as pd
from multiprocessing.dummy import Pool as ThreadPool
import itertools
from tqdm import tqdm
from anndata import AnnData
from typing import Union
from .utils import normalize_data, TF_link_gene_chip
from ..tools.utils import flatten, einsum_correlation
def scribe(
adata: AnnData,
genes: Union[list, None] = None,
TFs: Union[list, None] = None,
Targets: Union[list, None] = None,
gene_filter_rate: float = 0.1,
cell_filter_UMI: int = 10000,
motif_ref: str = "https://www.dropbox.com/s/s8em539ojl55kgf/motifAnnotations_hgnc.csv?dl=1",
nt_layers: list = ["X_new", "X_total"],
normalize: bool = False,
do_CLR: bool = True,
drop_zero_cells: bool = True,
TF_link_ENCODE_ref: str = "https://www.dropbox.com/s/bjuope41pte7mf4/df_gene_TF_link_ENCODE.csv?dl=1",
) -> AnnData:
"""Apply Scribe to calculate causal network from spliced/unspliced, metabolic labeling based and other "real" time
series datasets. Note that this function can be applied to both of the metabolic labeling based single-cell assays
with newly synthesized and total RNA as well as the regular single cell assays with both the unspliced and spliced
transcripts. Furthermore, you can also replace the either the new or unspliced RNA with dynamo estimated cell-wise
velocity, transcription, splicing and degradation rates for each gene (similarly, replacing the expression values
of transcription factors with RNA binding, ribosome, epigenetics or epitranscriptomic factors, etc.) to infer the
total regulatory effects, transcription, splicing and post-transcriptional regulation of different factors.
Parameters
----------
adata: :class:`~anndata.AnnData`.
adata object that includes both newly synthesized and total gene expression of cells. Alternatively,
the object should include both unspliced and spliced gene expression of cells.
genes:
The list of gene names that will be used for casual network inference. By default, it is `None` and thus
will use all genes.
TFs:
The list of transcription factors that will be used for casual network inference. When it is `None` gene
list included in the file linked by `motif_ref` will be used.
Targets:
The list of target genes that will be used for casual network inference. When it is `None` gene list not
included in the file linked by `motif_ref` will be used.
gene_filter_rate:
minimum percentage of expressed cells for gene filtering.
cell_filter_UMI:
minimum number of UMIs for cell filtering.
motif_ref:
It provides the list of TFs gene names and is used to parse the data to get the list of TFs and Targets
for the causal network inference from those TFs to Targets. But currently the motif based filtering is not
implemented. By default it is a dropbox link that store the data from us. Other motif reference can bed
downloaded from RcisTarget: https://resources.aertslab.org/cistarget/. For human motif matrix, it can be
downloaded from June's shared folder:
https://shendure-web.gs.washington.edu/content/members/cao1025/public/nobackup/sci_fate/data/hg19-tss-
centered-10kb-7species.mc9nr.feather
nt_layers:
The two keys for layers that will be used for the network inference. Note that the layers can be changed
flexibly. See the description of this function above. The first key corresponds to the transcriptome of the
next time point, for example unspliced RNAs (or estimated velocitym, see Fig 6 of the Scribe preprint:
https://www.biorxiv.org/content/10.1101/426981v1) from RNA velocity, new RNA from scSLAM-seq data, etc.
The second key corresponds to the transcriptome of the initial time point, for example spliced RNAs from RNA
velocity, old RNA from scSLAM-seq data.
drop_zero_cells:
Whether to drop cells that with zero expression for either the potential regulator or potential target. This
can signify the relationship between potential regulators and targets, speed up the calculation, but at the
risk of ignoring strong inhibition effects from certain regulators to targets.
do_CLR:
Whether to perform context likelihood relatedness analysis on the reconstructed causal network
TF_link_ENCODE_ref:
The path to the TF chip-seq data. By default it is a dropbox link from us that stores the data. Other data
can be downloaded from: https://amp.pharm.mssm.edu/Harmonizome/dataset/ENCODE+Transcription+Factor+Targets.
Returns
-------
An updated adata object with a new key `causal_net` in .uns attribute, which stores the inferred causal network.
"""
try:
from Scribe.Scribe import causal_net_dynamics_coupling, CLR
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Plelease install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
# detect format of the gene name:
str_format = (
"upper"
if adata.var_names[0].isupper()
else "lower"
if adata.var_names[0].islower()
else "title"
if adata.var_names[0].istitle()
else "other"
)
motifAnnotations_hgnc = pd.read_csv(motif_ref, sep="\t")
TF_list = motifAnnotations_hgnc.loc[:, "TF"].values
if str_format == "title":
TF_list = [i.capitalize() for i in TF_list]
elif str_format == "lower":
TF_list = [i.lower() for i in TF_list]
n_obs, n_var = adata.n_obs, adata.n_vars
# generate the expression matrix for downstream analysis
if nt_layers[1] == "old" and "old" not in adata.layers.keys():
adata.layers["old"] = (
adata.layers["total"] - adata.layers["new"]
if "velocity" not in adata.layers.keys()
else adata.layers["total"] - adata.layers["velocity"]
)
# filter genes
print(f"Original gene number: {n_var}")
gene_filter_new = (adata.layers[nt_layers[0]] > 0).sum(0) > (gene_filter_rate * n_obs)
gene_filter_tot = (adata.layers[nt_layers[1]] > 0).sum(0) > (gene_filter_rate * n_obs)
if issparse(adata.layers[nt_layers[0]]):
gene_filter_new = gene_filter_new.A1
if issparse(adata.layers[nt_layers[1]]):
gene_filter_tot = gene_filter_tot.A1
adata = adata[:, gene_filter_new * gene_filter_tot]
print(f"Gene number after filtering: {sum(gene_filter_new * gene_filter_tot)}")
# filter cells
print(f"Original cell number: {n_obs}")
cell_filter = adata.layers[nt_layers[1]].sum(1) > cell_filter_UMI
if issparse(adata.layers[nt_layers[1]]):
cell_filter = cell_filter.A1
adata = adata[cell_filter, :]
if adata.n_obs == 0:
raise Exception("No cells remaining after filtering, try relaxing `cell_filtering_UMI`.")
print(f"Cell number after filtering: {adata.n_obs}")
new = adata.layers[nt_layers[0]]
total = adata.layers[nt_layers[1]]
if normalize:
# recalculate size factor
from ..preprocessing import szFactor
adata = szFactor(
adata,
method="mean-geometric-mean-total",
round_exprs=True,
total_layers=["total"],
)
szfactors = adata.obs["Size_Factor"][:, None]
# normalize data (size factor correction, log transform and the scaling)
adata.layers[nt_layers[0]] = normalize_data(new, szfactors, pseudo_expr=0.1)
adata.layers[nt_layers[1]] = normalize_data(total, szfactors, pseudo_expr=0.1)
TFs = adata.var_names[adata.var.index.isin(TF_list)].to_list() if TFs is None else np.unique(TFs)
Targets = adata.var_names.difference(TFs).to_list() if Targets is None else np.unique(Targets)
if genes is not None:
TFs = list(set(genes).intersection(TFs))
Targets = list(set(genes).intersection(Targets))
if len(TFs) == 0 or len(Targets) == 0:
raise Exception(
"The TFs or Targets are empty! Something (input TFs/Targets list, gene_filter_rate, etc.) is wrong."
)
print(f"Potential TFs are: {len(TFs)}")
print(f"Potential Targets are: {len(Targets)}")
causal_net_dynamics_coupling(
adata,
TFs,
Targets,
t0_key=nt_layers[1],
t1_key=nt_layers[0],
normalize=False,
drop_zero_cells=drop_zero_cells,
)
res_dict = {"RDI": adata.uns["causal_net"]["RDI"]}
if do_CLR:
res_dict.update({"CLR": CLR(res_dict["RDI"])})
if TF_link_ENCODE_ref is not None:
df_gene_TF_link_ENCODE = pd.read_csv(TF_link_ENCODE_ref, sep="\t")
df_gene_TF_link_ENCODE["id_gene"] = (
df_gene_TF_link_ENCODE["id"].astype("str") + "_" + df_gene_TF_link_ENCODE["linked_gene_name"].astype("str")
)
df_gene = pd.DataFrame(adata.var.index, index=adata.var.index)
df_gene.columns = ["linked_gene"]
net = res_dict[list(res_dict.keys())[-1]]
net = net.reset_index().melt(
id_vars="index",
id_names="id",
var_name="linked_gene",
value_name="corcoef",
)
net_var = net.merge(df_gene)
net_var["id_gene"] = net_var["id"].astype("str") + "_" + net_var["linked_gene_name"].astype("str")
filtered = TF_link_gene_chip(net_var, df_gene_TF_link_ENCODE, adata.var, cor_thresh=0.02)
res_dict.update({"filtered": filtered})
adata.uns["causal_net"] = res_dict
return adata
def coexp_measure(adata, genes, layer_x, layer_y, cores=1, skip_mi=True):
"""Calculate co-expression measures, including mutual information (MI), pearson correlation, etc. of genes between
two different layers.
Parameters
----------
adata: :class:`~anndata.AnnData`.
adata object that will be used for mutual information calculation.
genes: `List` (default: None)
Gene names from the adata object that will be used for mutual information calculation.
layer_x: `str`
The first key of the layer from the adata object that will be used for mutual information calculation.
layer_y: `str`
The second key of the layer from the adata object that will be used for mutual information calculation.
cores: `int` (default: 1)
Number of cores to run the MI calculation. If cores is set to be > 1, multiprocessing will be used to
parallel the calculation. `cores` is only applicable to MI calculation.
skip_mi: `bool` (default: `True`)
Whether to skip the mutual information calculation step which is time-consuming.
Returns
-------
An updated adata object that updated with a new columns (`mi`, `pearson`) in .var contains the mutual
information of input genes.
"""
try:
from Scribe.information_estimators import mi
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Plelease install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
adata.var["mi"], adata.var["pearson"] = np.nan, np.nan
if not skip_mi:
mi_vec = np.zeros(len(genes))
pearson = np.zeros(len(genes))
X, Y = adata[:, genes].layers[layer_x].T, adata[:, genes].layers[layer_y].T
X, Y = X.A if issparse(X) else X, Y.A if issparse(Y) else Y
k = min(5, int(adata.n_obs / 5 + 1))
for i in tqdm(
range(len(genes)),
desc=f"calculating mutual information between {layer_x} and {layer_y} data",
):
x, y = X[i], Y[i]
mask = np.logical_and(np.isfinite(x), np.isfinite(y))
pearson[i] = einsum_correlation(x[None, mask], y[mask], type="pearson")
x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]
if not skip_mi:
if cores == 1:
mi_vec[i] = mi(x, y, k=k)
if cores != 1:
if not skip_mi:
def pool_mi(x, y, k):
mask = np.logical_and(np.isfinite(x), np.isfinite(y))
x, y = [[i] for i in x[mask]], [[i] for i in y[mask]]
return mi(x, y, k)
pool = ThreadPool(cores)
res = pool.starmap(pool_mi, zip(X, Y, itertools.repeat(k)))
pool.close()
pool.join()
mi_vec = np.array(res)
if not skip_mi:
adata.var.loc[genes, "mi"] = mi_vec
adata.var.loc[genes, "pearson"] = pearson
def coexp_measure_mat(
adata,
TFs=None,
Targets=None,
guide_keys=None,
t0_key="spliced",
t1_key="velocity",
normalize=True,
drop_zero_cells=True,
skip_mi=True,
cores=1,
copy=False,
):
"""Infer causal networks with dynamics-coupled single cells measurements.
Network inference is a insanely challenging problem which has a long history and that none of the existing
algorithms work well. However, it's quite possible that one or more of the algorithms could work if only they were
given enough data. Single-cell RNA-seq is exciting because it provides a ton of data. Somewhat surprisingly, just
having a lot of single-cell RNA-seq data won't make causal inference work well. We need a fundamentally better type
of measurement that couples information across cells and across time points. Experimental improvements are coming
now, and whether they are sufficient to power methods like Scribe is important future work. For example, the recent
developed computational algorithm (La Manno et al. 2018) estimates the levels of new (unspliced) versus mature
(spliced) transcripts from single-cell RNA-seq data for free. Moreover, exciting experimental approaches, like
single cell SLAM-seq methods (Hendriks et al. 2018; Erhard et al. 2019; Cao, Zhou, et al. 2019) are recently
developed that measures the transcriptome of two time points of the same cells. Datasets generated from those
methods will provide improvements of causal network inference as we comprehensively demonstrated from the manuscript
. This function take advantages of those datasets to infer the causal networks. We note that those technological
advance may be still not sufficient, radically different methods, for example something like highly multiplexed live
imaging that can record many genes may be needed.
Arguments
---------
adata: `anndata`
Annotated data matrix.
TFs: `List` or `None` (default: None)
The list of transcription factors that will be used for casual network inference.
Targets: `List` or `None` (default: None)
The list of target genes that will be used for casual network inference.
guide_keys: `List` (default: None)
The key of the CRISPR-guides, stored as a column in the .obs attribute. This argument is useful
for identifying the knockout or knockin genes for a perturb-seq experiment. Currently not used.
t0_key: `str` (default: spliced)
Key corresponds to the transcriptome of the initial time point, for example spliced RNAs from RNA velocity, old
RNA from scSLAM-seq data.
t1_key: `str` (default: velocity)
Key corresponds to the transcriptome of the next time point, for example unspliced RNAs (or estimated velocity,
see Fig 6 of the Scribe preprint) from RNA velocity, old RNA from scSLAM-seq data.
normalize: `bool`
Whether to scale the expression or velocity values into 0 to 1 before calculating causal networks.
drop_zero_cells: `bool` (Default: True)
Whether to drop cells that with zero expression for either the potential regulator or potential target. This
can signify the relationship between potential regulators and targets, speed up the calculation, but at the risk
of ignoring strong inhibition effects from certain regulators to targets.
copy: `bool`
Whether to return a copy of the adata or just update adata in place.
Returns
---------
An update AnnData object with inferred causal network stored as a matrix related to the key `causal_net` in the
`uns` slot.
"""
try:
from Scribe.information_estimators import mi
except ImportError:
raise ImportError(
"You need to install the package `Scribe`."
"Please install from https://github.com/aristoteleo/Scribe-py."
"Also check our paper: "
"https://www.sciencedirect.com/science/article/abs/pii/S2405471220300363"
)
if TFs is None:
TFs = adata.var_names.tolist()
else:
TFs = adata.var_names.intersection(TFs).tolist()
if len(TFs) == 0:
raise Exception(
"The adata object has no gene names from .var_name that intersects with the TFs list you provided"
)
if Targets is None:
Targets = adata.var_names.tolist()
else:
Targets = adata.var_names.intersection(Targets).tolist()
if len(Targets) == 0:
raise Exception(
"The adata object has no gene names from .var_name that intersect with the Targets list you provided"
)
if guide_keys is not None:
guides = np.unique(adata.obs[guide_keys].tolist())
guides = np.setdiff1d(guides, ["*", "nan", "neg"])
idx_var = [vn in guides for vn in adata.var_names]
idx_var = np.argwhere(idx_var)
guides = adata.var_names.values[idx_var.flatten()].tolist()
# support sparse matrix:
genes = TFs + Targets
genes = np.unique(genes)
t0_df = (
pd.DataFrame(adata[:, genes].layers[t0_key].todense(), index=adata.obs_names, columns=genes)
if isspmatrix(adata.layers[t0_key])
else pd.DataFrame(adata[:, genes].layers[t0_key], index=adata.obs_names, columns=genes)
)
t1_df = (
pd.DataFrame(adata[:, genes].layers[t1_key].todense(), index=adata.obs_names, columns=genes)
if isspmatrix(adata.layers[t1_key])
else pd.DataFrame(adata[:, genes].layers[t1_key], index=adata.obs_names, columns=genes)
)
t1_df[ | pd.isna(t1_df) | pandas.isna |
import logging
import numpy as np
import pandas as pd
from collections import Counter as counter
from tardis.plasma.properties.base import (
ProcessingPlasmaProperty,
HiddenPlasmaProperty,
BaseAtomicDataProperty,
)
from tardis.plasma.exceptions import IncompleteAtomicData
logger = logging.getLogger(__name__)
__all__ = [
"Levels",
"Lines",
"LinesLowerLevelIndex",
"LinesUpperLevelIndex",
"AtomicMass",
"IonizationData",
"ZetaData",
"NLTEData",
"PhotoIonizationData",
]
class Levels(BaseAtomicDataProperty):
"""
Attributes
----------
levels : pandas.MultiIndex
(atomic_number, ion_number, level_number)
Index of filtered atomic data. Index used for all other attribute dataframes for this class
excitation_energy : pandas.DataFrame, dtype float
Excitation energies of atomic levels.
Index is levels.
metastability : pandas.DataFrame, dtype bool
Records whether atomic levels are metastable.
Index is levels.
g : pandas.DataFrame (index=levels), dtype float
Statistical weights of atomic levels.
"""
outputs = ("levels", "excitation_energy", "metastability", "g")
latex_name = (
r"\textrm{levels}",
r"\epsilon_{\textrm{k}}",
r"\textrm{metastability}",
"g",
)
def _filter_atomic_property(self, levels, selected_atoms):
return levels
# return levels[levels.atomic_number.isin(selected_atoms)]
def _set_index(self, levels):
# levels = levels.set_index(['atomic_number', 'ion_number',
# 'level_number'])
return (
levels.index,
levels["energy"],
levels["metastable"],
levels["g"],
)
class Lines(BaseAtomicDataProperty):
"""
Attributes
----------
lines : pandas.DataFrame
Atomic lines data. Columns are wavelength, atomic_number,ion_number,
f_ul, f_lu, level_number_lower, level_number_upper, nu, B_lu, B_ul, A_ul,
wavelength. Index is line_id.
nu : pandas.DataFrame, dtype float
Line frequency data. Index is line_id.
f_lu : pandas.DataFrame, dtype float
Transition probability data. Index is line_id.
wavelength_cm : pandas.DataFrame, dtype float
Line wavelengths in cm. Index is line_id.
"""
# Would like for lines to just be the line_id values
outputs = ("lines", "nu", "f_lu", "wavelength_cm")
def _filter_atomic_property(self, lines, selected_atoms):
# return lines[lines.atomic_number.isin(selected_atoms)]
return lines
def _set_index(self, lines):
# lines.set_index('line_id', inplace=True)
return lines, lines["nu"], lines["f_lu"], lines["wavelength_cm"]
class PhotoIonizationData(ProcessingPlasmaProperty):
"""
Attributes
----------
photo_ion_cross_sections : pandas.DataFrame, dtype float
Photoionization cross sections as a function of frequency.
Columns are nu, x_sect, index=('atomic_number','ion_number','level_number')
photo_ion_block_references : numpy.ndarray, dtype int
Indices where the photoionization data for
a given level starts. Needed for calculation
of recombination rates.
photo_ion_index : pandas.MultiIndex, dtype int
Atomic, ion and level numbers for which
photoionization data exists.
"""
outputs = (
"photo_ion_cross_sections",
"photo_ion_block_references",
"photo_ion_index",
)
latex_name = (r"\xi_{\textrm{i}}(\nu)", "", "")
def calculate(self, atomic_data, continuum_interaction_species):
photoionization_data = atomic_data.photoionization_data.set_index(
["atomic_number", "ion_number", "level_number"]
)
selected_species_idx = pd.IndexSlice[
continuum_interaction_species.get_level_values("atomic_number"),
continuum_interaction_species.get_level_values("ion_number"),
slice(None),
]
photoionization_data = photoionization_data.loc[selected_species_idx]
phot_nus = photoionization_data["nu"]
block_references = np.hstack(
[[0], phot_nus.groupby(level=[0, 1, 2]).count().values.cumsum()]
)
photo_ion_index = photoionization_data.index.unique()
return photoionization_data, block_references, photo_ion_index
class LinesLowerLevelIndex(HiddenPlasmaProperty):
"""
Attributes
----------
lines_lower_level_index : numpy.ndrarray, dtype int
Levels data for lower levels of particular lines
"""
outputs = ("lines_lower_level_index",)
def calculate(self, levels, lines):
levels_index = pd.Series(
np.arange(len(levels), dtype=np.int64), index=levels
)
lines_index = lines.index.droplevel("level_number_upper")
return np.array(levels_index.loc[lines_index])
class LinesUpperLevelIndex(HiddenPlasmaProperty):
"""
Attributes
----------
lines_upper_level_index : numpy.ndarray, dtype int
Levels data for upper levels of particular lines
"""
outputs = ("lines_upper_level_index",)
def calculate(self, levels, lines):
levels_index = pd.Series(
np.arange(len(levels), dtype=np.int64), index=levels
)
lines_index = lines.index.droplevel("level_number_lower")
return np.array(levels_index.loc[lines_index])
class AtomicMass(ProcessingPlasmaProperty):
"""
Attributes
----------
atomic_mass : pandas.Series
Atomic masses of the elements used. Indexed by atomic number.
"""
outputs = ("atomic_mass",)
def calculate(self, atomic_data, selected_atoms):
if getattr(self, self.outputs[0]) is not None:
return (getattr(self, self.outputs[0]),)
else:
return atomic_data.atom_data.loc[selected_atoms].mass
class IonizationData(BaseAtomicDataProperty):
"""
Attributes
----------
ionization_data : pandas.Series
Holding ionization energies
Indexed by atomic number, ion number.
"""
outputs = ("ionization_data",)
def _filter_atomic_property(self, ionization_data, selected_atoms):
mask = ionization_data.index.isin(selected_atoms, level="atomic_number")
ionization_data = ionization_data[mask]
counts = ionization_data.groupby(level="atomic_number").count()
if np.alltrue(counts.index == counts):
return ionization_data
else:
raise IncompleteAtomicData(
"ionization data for the ion ({}, {})".format(
str(counts.index[counts.index != counts]),
str(counts[counts.index != counts]),
)
)
def _set_index(self, ionization_data):
return ionization_data
class ZetaData(BaseAtomicDataProperty):
"""
Attributes
----------
zeta_data : pandas.DataFrame, dtype float
Zeta data for the elements used. Indexed by atomic number, ion number.
Columns are temperature values up to 40,000 K in iterations of 2,000 K.
The zeta value represents the fraction of recombination events
from the ionized state that go directly to the ground state.
"""
outputs = ("zeta_data",)
def _filter_atomic_property(self, zeta_data, selected_atoms):
zeta_data["atomic_number"] = zeta_data.index.codes[0] + 1
zeta_data["ion_number"] = zeta_data.index.codes[1] + 1
zeta_data = zeta_data[zeta_data.atomic_number.isin(selected_atoms)]
zeta_data_check = counter(zeta_data.atomic_number.values)
keys = np.array(list(zeta_data_check.keys()))
values = np.array(zeta_data_check.values())
if np.alltrue(keys + 1 == values):
return zeta_data
else:
# raise IncompleteAtomicData('zeta data')
# This currently replaces missing zeta data with 1, which is necessary with
# the present atomic data. Will replace with the error above when I have
# complete atomic data.
missing_ions = []
updated_index = []
for atom in selected_atoms:
for ion in range(1, atom + 2):
if (atom, ion) not in zeta_data.index:
missing_ions.append((atom, ion))
updated_index.append([atom, ion])
logger.warn(
"Zeta_data missing - replaced with 1s. Missing ions: {}".format(
missing_ions
)
)
updated_index = np.array(updated_index)
updated_dataframe = pd.DataFrame(
index=pd.MultiIndex.from_arrays(
updated_index.transpose().astype(int)
),
columns=zeta_data.columns,
)
for value in range(len(zeta_data)):
updated_dataframe.loc[
zeta_data.atomic_number.values[value],
zeta_data.ion_number.values[value],
] = zeta_data.loc[
zeta_data.atomic_number.values[value],
zeta_data.ion_number.values[value],
]
updated_dataframe = updated_dataframe.astype(float)
updated_index = | pd.DataFrame(updated_index) | pandas.DataFrame |
import calendar as cal
import pandas as pd
from hidrocomp.series.exceptions import StationError
from hidrocomp.statistic.pearson3 import Pearson3
from hidrocomp.series.series_build import SeriesBuild
from hidrocomp.series.partial import Partial
from hidrocomp.series.maximum import MaximumFlow
from hidrocomp.series.minimum import MinimumFlow
from hidrocomp.series.monthly_average import MonthlyAverageFlow
from hidrocomp.eflow import IHA
from hidrocomp.graphics import RatingCurve, HydrogramYear, HydrogramClean
class Flow(SeriesBuild):
type_data = 'FLUVIOMÉTRICO'
data_type = 'flow'
def __init__(self, data=None, path_file=None, station=None, source=None, *args, **kwargs):
super().__init__(data=data, path=path_file, station=station, source=source, data_type=self.type_data, *args,
**kwargs)
self.__month_num_flood = None
self.__month_abr_flood = None
self.__month_num_drought = None
self.__month_abr_drought = None
def _month_start_year_hydrologic(self):
if self.__month_num_flood is None:
if self.station is None:
raise TypeError("Define a station!")
else:
data = | pd.DataFrame(self.data[self.station]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import sklearn as sk
import matplotlib.pyplot as plt
from sklearn import metrics
from json import *
import requests
pd.set_option('display.max_rows', 21000)
pd.set_option('display.max_columns', 500)
| pd.set_option('display.width', 150) | pandas.set_option |
import numpy as np
import pandas as pd
from matplotlib import *
# .........................Series.......................#
x1 = np.array([1, 2, 3, 4])
s = | pd.Series(x1, index=[1, 2, 3, 4]) | pandas.Series |
"""
This class contains all parameters for all models for different countries.
It contains methods to obtain observed data.
It also contains the common methods to use the model itself.
"""
import numpy as np
import pandas as pd
import math
from Communication import Database
np.set_printoptions(suppress=True)
# Inspiration for the model:
# https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3182455/
# https://www.researchgate.net/publication/334695153_Complete_maximum_likelihood_estimation_for_SEIR_epidemic_models_theoretical_development
# I got up to the first sample path diagrams before I got completely lost
class Parameters:
"""
A single class to store all model parameters.
See model_1 comments for information on these parameters
"""
def __init__(self, m, n, Ep, Ip, alpha, beta, offset, country, is_country, N):
self.m = m
self.n = n
self.Ep = Ep
self.Ip = Ip
self.alpha = alpha
self.beta = beta
self.offset = offset
self.country = country
self.is_country = is_country
self.N = N
def unpack(self):
"""
:return: all the model parameters as a tuple
"""
return self.m, self.n, self.Ep, self.Ip, self.alpha, self.beta, self.offset, \
self.country, self.is_country, self.N
def choose(n, r):
return (math.factorial(n)) / math.factorial(r) / math.factorial(n - r)
def binomial(x, n, p):
return choose(n, x) * p ** x * (1 - p) ** (n - x)
def binomial_dist(n, p):
dist = []
for x in range(n + 1):
dist.append(binomial(x, n, p))
return np.array(dist)
def neg_bin(x, k, p):
return choose(x + k - 1, k - 1) * p ** k * (1 - p) ** x
def neg_bin_dist(k, p, length):
dist = []
for x in range(length):
dist.append(neg_bin(x, k, p))
return np.array(dist)
def get_observed_I_and_R(country: str, is_country: bool = True):
"""
Gets the data for the number of confirmed cases and the number of recovered.
:param country: The country or province for which to get the cases
:param is_country: Whether the country variable is for a country or for a province
:return: A tuple of 2 arrays: one for cumulative infected and one for cumulative recovered
"""
country = country.replace("'", "")
database = Database("Data\\CombinedData.accdb")
if is_country:
# Source: https://data.humdata.org/dataset/novel-coronavirus-2019-ncov-cases
# And https://www.worldometers.info/coronavirus/
data_dict = database.select("SELECT IRTbl.Country, IRTbl.Day, Sum(IRTbl.Infected) AS SumOfInfected, "
"Sum([IRTbl].[Recovered]+[IRTbl].[Dead]) AS SumOfRecoveredAndDead\n"
"FROM IRTbl\n"
"GROUP BY IRTbl.Country, IRTbl.Day\n"
"HAVING (((IRTbl.Country)='{}') AND ((Sum(IRTbl.Infected))>0))\n"
"ORDER BY IRTbl.Day;".format(country),
["SumOfInfected", "SumOfRecoveredAndDead"])
else:
print("Warning: Worldometer (the data source since 2020/03/25) does not specify provinces in their data.\n"
" It is best to rather specify a country than a province due to this.")
data_dict = database.select("SELECT IRTbl.Province, IRTbl.Day, Sum(IRTbl.Infected) AS SumOfInfected, "
"Sum([IRTbl].[Recovered]+[IRTbl].[Dead]) AS SumOfRecoveredAndDead\n"
"FROM IRTbl\n"
"GROUP BY IRTbl.Province, IRTbl.Day\n"
"HAVING (((IRTbl.Province)='{}') AND ((Sum([IRTbl].[Infected]))>0))\n"
"ORDER BY IRTbl.Day;".format(country),
["SumOfInfected", "SumOfRecoveredAndDead"])
data = | pd.DataFrame(data=data_dict) | pandas.DataFrame |
from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Sequence,
cast,
)
import numpy as np
from pandas._libs import (
NaT,
internals as libinternals,
)
from pandas._libs.missing import NA
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_1d_only_ea_dtype,
is_1d_only_ea_obj,
is_datetime64tz_dtype,
is_dtype_equal,
needs_i8_conversion,
)
from pandas.core.dtypes.concat import (
cast_to_common_type,
concat_compat,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import ensure_wrapped_if_datetimelike
from pandas.core.internals.array_manager import (
ArrayManager,
NullArrayProxy,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(
axes[ax], indexer, axis=ax, allow_dups=True, use_na_proxy=True
)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
if copy:
arrays = [x.copy() for x in arrays]
new_mgr = ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
return new_mgr
def concat_arrays(to_concat: list) -> ArrayLike:
"""
Alternative for concat_compat but specialized for use in the ArrayManager.
Differences: only deals with 1D arrays (no axis keyword), assumes
ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
the dtype.
In addition ensures that all NullArrayProxies get replaced with actual
arrays.
Parameters
----------
to_concat : list of arrays
Returns
-------
np.ndarray or ExtensionArray
"""
# ignore the all-NA proxies to determine the resulting dtype
to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
dtypes = {x.dtype for x in to_concat_no_proxy}
single_dtype = len(dtypes) == 1
if single_dtype:
target_dtype = to_concat_no_proxy[0].dtype
elif all(x.kind in ["i", "u", "b"] and isinstance(x, np.dtype) for x in dtypes):
# GH#42092
target_dtype = np.find_common_type(list(dtypes), [])
else:
target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
if target_dtype.kind in ["m", "M"]:
# for datetimelike use DatetimeArray/TimedeltaArray concatenation
# don't use arr.astype(target_dtype, copy=False), because that doesn't
# work for DatetimeArray/TimedeltaArray (returns ndarray)
to_concat = [
arr.to_array(target_dtype) if isinstance(arr, NullArrayProxy) else arr
for arr in to_concat
]
return type(to_concat_no_proxy[0])._concat_same_type(to_concat, axis=0)
to_concat = [
arr.to_array(target_dtype)
if isinstance(arr, NullArrayProxy)
else cast_to_common_type(arr, target_dtype)
for arr in to_concat
]
if isinstance(to_concat[0], ExtensionArray):
cls = type(to_concat[0])
return cls._concat_same_type(to_concat)
result = np.concatenate(to_concat)
# TODO decide on exact behaviour (we shouldn't do this only for empty result)
# see https://github.com/pandas-dev/pandas/issues/39817
if len(result) == 0:
# all empties -> check for bool to not coerce to float
kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
if len(kinds) != 1:
if "b" in kinds:
result = result.astype(object)
return result
def concatenate_managers(
mgrs_indexers, axes: list[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
unit = join_units[0]
blk = unit.block
if len(join_units) == 1 and not join_units[0].indexers:
values = blk.values
if copy:
values = values.copy()
else:
values = values.view()
fastpath = True
elif _is_uniform_join_units(join_units):
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals, axis=1)
values = ensure_block_shape(values, blk.ndim)
values = ensure_wrapped_if_datetimelike(values)
fastpath = blk.values.dtype == values.dtype
else:
values = _concatenate_join_units(join_units, concat_axis, copy=copy)
fastpath = False
if fastpath:
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block(values, placement=placement, ndim=len(axes))
blocks.append(b)
return BlockManager(tuple(blocks), axes)
def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: dict[int, np.ndarray]):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
has_column_indexer = False
if 0 in indexers:
has_column_indexer = True
ax0_indexer = indexers.pop(0)
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
# only reachable in the `0 in indexers` case
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
not has_column_indexer
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
# Note: block is None implies indexers is None, but not vice-versa
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
blk = self.block
if blk is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return blk.dtype
return ensure_dtype_can_hold_na(blk.dtype)
def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
"""
Check that we are all-NA of a type/dtype that is compatible with this dtype.
Augments `self.is_na` with an additional check of the type of NA values.
"""
if not self.is_na:
return False
if self.block is None:
return True
if self.dtype == object:
values = self.block.values
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
na_value = self.block.fill_value
if na_value is NaT and not is_dtype_equal(self.dtype, dtype):
# e.g. we are dt64 and other is td64
# fill_values match but we should not cast self.block.values to dtype
# TODO: this will need updating if we ever have non-nano dt64/td64
return False
if na_value is NA and needs_i8_conversion(dtype):
# FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
# e.g. self.dtype == "Int64" and dtype is td64, we dont want
# to consider these as matching
return False
# TODO: better to use can_hold_element?
return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
values = self.block.values
if isinstance(self.block.values.dtype, SparseDtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self._is_valid_na_for(empty_dtype):
# note: always holds when self.block is None
blk_dtype = getattr(self.block, "dtype", None)
if blk_dtype == np.dtype("object"):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if is_datetime64tz_dtype(empty_dtype):
i8values = np.full(self.shape, fill_value.value)
return | DatetimeArray(i8values, dtype=empty_dtype) | pandas.core.arrays.DatetimeArray |
from unittest import TestCase
import pandas as pd
import numpy as np
from skbio import OrdinationResults
from q2_convexhull.convexhull import convex_hull
from q2_convexhull.convexhull import validate
from pandas.testing import assert_frame_equal
from qiime2 import Metadata
class TestConvexHull(TestCase):
def setUp(self):
self.individual_id_column = 'unique_id'
self.number_of_dimensions = 3
index = pd.Index(
['i1', 'i2', 'i3', 'i4', 'i5', 'i6', 'i7', 'i8',
'x1', 'x2', 'x3', 'x4', 'x5', 'x6', 'x7', 'x8'],
name='sampleid')
samples_df = pd.DataFrame(
{'PC1': [0, 0, 0, 0, 1, 1, 1, 1,
3, 3, 3, 3, 4, 4, 4, 4],
'PC2': [0, 0, 1, 1, 0, 0, 1, 1,
3, 3, 4, 4, 3, 3, 4, 4],
'PC3': [0, 1, 0, 1, 0, 1, 0, 1,
3, 4, 3, 4, 3, 4, 3, 4]},
index=index)
proportion_explained = pd.Series(
[15.5, 12.2, 8.7],
index=['PC1', 'PC2', 'PC3'])
values = pd.Series(
np.array([0.7, 0.2, 0.1]),
index=['PC1', 'PC2', 'PC3'])
self.pcoa = OrdinationResults(
'PCoA',
'Principal Coordinate Analysis',
values,
samples_df,
proportion_explained=proportion_explained)
metadata = pd.DataFrame(
{self.individual_id_column:
['s1', 's1', 's1', 's1', 's1', 's1', 's1', 's1',
's2', 's2', 's2', 's2', 's2', 's2', 's2', 's2']},
index=index)
self.metadata = Metadata(metadata)
def test_squares(self):
hulls = convex_hull(self.metadata,
self.pcoa,
self.individual_id_column,
self.number_of_dimensions)
expected = pd.DataFrame(
{self.individual_id_column: ['s1', 's2'],
'convexhull_volume': [1.0, 1.0],
'convexhull_area': [6.0, 6.0]})
| assert_frame_equal(hulls, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = ujson.encode(list_input)
assert list_input == json.loads(output)
assert list_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(list_input),
ujson.decode(output, numpy=True))
def test_encode_dict_conversion(self):
dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(dict_input)
assert dict_input == json.loads(output)
assert dict_input == ujson.decode(output)
@pytest.mark.parametrize("builtin_value", [None, True, False])
def test_encode_builtin_values_conversion(self, builtin_value):
output = ujson.encode(builtin_value)
assert builtin_value == json.loads(output)
assert output == json.dumps(builtin_value)
assert builtin_value == ujson.decode(output)
def test_encode_datetime_conversion(self):
datetime_input = datetime.datetime.fromtimestamp(time.time())
output = ujson.encode(datetime_input, date_unit="s")
expected = calendar.timegm(datetime_input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encode_date_conversion(self):
date_input = datetime.date.fromtimestamp(time.time())
output = ujson.encode(date_input, date_unit="s")
tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
@pytest.mark.parametrize("test", [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
])
def test_encode_time_conversion_basic(self, test):
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encode_time_conversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
@pytest.mark.parametrize("decoded_input", [
NaT,
np.datetime64("NaT"),
np.nan,
np.inf,
-np.inf
])
def test_encode_as_null(self, decoded_input):
assert ujson.encode(decoded_input) == "null", "Expected null"
def test_datetime_units(self):
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encode_to_utf8(self):
unencoded = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(unencoded, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(unencoded, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decode_from_unicode(self):
unicode_input = u("{\"obj\": 31337}")
dec1 = ujson.decode(unicode_input)
dec2 = ujson.decode(str(unicode_input))
assert dec1 == dec2
def test_encode_recursion_max(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
decoded_input = O1()
decoded_input.member = O2()
decoded_input.member.member = decoded_input
with pytest.raises(OverflowError):
ujson.encode(decoded_input)
def test_decode_jibberish(self):
jibberish = "fdsa sda v9sa fdsa"
with pytest.raises(ValueError):
ujson.decode(jibberish)
@pytest.mark.parametrize("broken_json", [
"[", # Broken array start.
"{", # Broken object start.
"]", # Broken array end.
"}", # Broken object end.
])
def test_decode_broken_json(self, broken_json):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("too_big_char", [
"[",
"{",
])
def test_decode_depth_too_big(self, too_big_char):
with pytest.raises(ValueError):
ujson.decode(too_big_char * (1024 * 1024))
@pytest.mark.parametrize("bad_string", [
"\"TESTING", # Unterminated.
"\"TESTING\\\"", # Unterminated escape.
"tru", # Broken True.
"fa", # Broken False.
"n", # Broken None.
])
def test_decode_bad_string(self, bad_string):
with pytest.raises(ValueError):
ujson.decode(bad_string)
@pytest.mark.parametrize("broken_json", [
'{{1337:""}}',
'{{"key":"}',
'[[[true',
])
def test_decode_broken_json_leak(self, broken_json):
for _ in range(1000):
with pytest.raises(ValueError):
ujson.decode(broken_json)
@pytest.mark.parametrize("invalid_dict", [
"{{{{31337}}}}", # No key.
"{{{{\"key\":}}}}", # No value.
"{{{{\"key\"}}}}", # No colon or value.
])
def test_decode_invalid_dict(self, invalid_dict):
with pytest.raises(ValueError):
ujson.decode(invalid_dict)
@pytest.mark.parametrize("numeric_int_as_str", [
"31337", "-31337" # Should work with negatives.
])
def test_decode_numeric_int(self, numeric_int_as_str):
assert int(numeric_int_as_str) == ujson.decode(numeric_int_as_str)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encode_unicode_4bytes_utf8_fail(self):
with pytest.raises(OverflowError):
ujson.encode("\xfd\xbf\xbf\xbf\xbf\xbf")
def test_encode_null_character(self):
wrapped_input = "31337 \x00 1337"
output = ujson.encode(wrapped_input)
assert wrapped_input == json.loads(output)
assert output == json.dumps(wrapped_input)
assert wrapped_input == ujson.decode(output)
alone_input = "\x00"
output = ujson.encode(alone_input)
assert alone_input == json.loads(output)
assert output == json.dumps(alone_input)
assert alone_input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
def test_decode_null_character(self):
wrapped_input = "\"31337 \\u0000 31337\""
assert ujson.decode(wrapped_input) == json.loads(wrapped_input)
def test_encode_list_long_conversion(self):
long_input = [9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807]
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert long_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(long_input),
ujson.decode(output, numpy=True,
dtype=np.int64))
def test_encode_long_conversion(self):
long_input = 9223372036854775807
output = ujson.encode(long_input)
assert long_input == json.loads(output)
assert output == json.dumps(long_input)
assert long_input == ujson.decode(output)
@pytest.mark.parametrize("int_exp", [
"1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"
])
def test_decode_numeric_int_exp(self, int_exp):
assert ujson.decode(int_exp) == json.loads(int_exp)
def test_dump_to_file(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dump_to_file_like(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, data_bytes):
self.bytes += data_bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dump_file_args_error(self):
with pytest.raises(TypeError):
ujson.dump([], "")
def test_load_file(self):
data = "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = StringIO(data)
assert exp_data == ujson.load(f)
f = StringIO(data)
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_like(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
exp_data = [1, 2, 3, 4]
f = FileLike()
assert exp_data == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(np.array(exp_data),
ujson.load(f, numpy=True))
def test_load_file_args_error(self):
with pytest.raises(TypeError):
ujson.load("[]")
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encode_numeric_overflow(self):
with pytest.raises(OverflowError):
ujson.encode(12839128391289382193812939)
def test_encode_numeric_overflow_nested(self):
class Nested(object):
x = 12839128391289382193812939
for _ in range(0, 100):
with pytest.raises(OverflowError):
ujson.encode(Nested())
@pytest.mark.parametrize("val", [
3590016419, 2**31, 2**32, (2**32) - 1
])
def test_decode_number_with_32bit_sign_bit(self, val):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
doc = '{{"id": {val}}}'.format(val=val)
assert ujson.decode(doc)["id"] == val
def test_encode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
escape_input = base * 1024 * 1024 * 2
ujson.encode(escape_input)
def test_decode_big_escape(self):
# Make sure no Exception is raised.
for _ in range(10):
base = '\u00e5'.encode("utf-8") if compat.PY3 else "\xc3\xa5"
quote = compat.str_to_bytes("\"")
escape_input = quote + (base * 1024 * 1024 * 2) + quote
ujson.decode(escape_input)
def test_to_dict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_default_handler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(_):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(_):
raise TypeError("I raise for anything")
with pytest.raises(TypeError, match="I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(_):
return 42
assert ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_int_handler)) == 42
def my_obj_handler(_):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
obj_list = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(obj_list, default=str)) ==
ujson.decode(ujson.encode(obj_list, default_handler=str)))
class TestNumpyJSONTests(object):
@pytest.mark.parametrize("bool_input", [True, False])
def test_bool(self, bool_input):
b = np.bool(bool_input)
assert ujson.decode(ujson.encode(b)) == b
def test_bool_array(self):
bool_array = np.array([
True, False, True, True,
False, True, False, False], dtype=np.bool)
output = np.array(ujson.decode(
ujson.encode(bool_array)), dtype=np.bool)
tm.assert_numpy_array_equal(bool_array, output)
def test_int(self, any_int_dtype):
klass = np.dtype(any_int_dtype).type
num = klass(1)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_int_array(self, any_int_dtype):
arr = np.arange(100, dtype=np.int)
arr_input = arr.astype(any_int_dtype)
arr_output = np.array(ujson.decode(ujson.encode(arr_input)),
dtype=any_int_dtype)
tm.assert_numpy_array_equal(arr_input, arr_output)
def test_int_max(self, any_int_dtype):
if any_int_dtype in ("int64", "uint64") and compat.is_platform_32bit():
pytest.skip("Cannot test 64-bit integer on 32-bit platform")
klass = np.dtype(any_int_dtype).type
# uint64 max will always overflow,
# as it's encoded to signed.
if any_int_dtype == "uint64":
num = np.iinfo("int64").max
else:
num = np.iinfo(any_int_dtype).max
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(256.2013)
assert klass(ujson.decode(ujson.encode(num))) == num
def test_float_array(self, float_dtype):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
float_input = arr.astype(float_dtype)
float_output = np.array(ujson.decode(
ujson.encode(float_input, double_precision=15)),
dtype=float_dtype)
tm.assert_almost_equal(float_input, float_output)
def test_float_max(self, float_dtype):
klass = np.dtype(float_dtype).type
num = klass(np.finfo(float_dtype).max / 10)
tm.assert_almost_equal(klass(ujson.decode(
ujson.encode(num, double_precision=15))), num)
def test_array_basic(self):
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
@pytest.mark.parametrize("shape", [
(10, 10),
(5, 5, 4),
(100, 1),
])
def test_array_reshaped(self, shape):
arr = np.arange(100)
arr = arr.reshape(shape)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
def test_array_list(self):
arr_list = ["a", list(), dict(), dict(), list(),
42, 97.8, ["a", "b"], {"key": "val"}]
arr = np.array(arr_list)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
def test_array_float(self):
dtype = np.float32
arr = np.arange(100.202, 200.202, 1, dtype=dtype)
arr = arr.reshape((5, 5, 4))
arr_out = np.array(ujson.decode(ujson.encode(arr)), dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
arr_out = ujson.decode(ujson.encode(arr), numpy=True, dtype=dtype)
tm.assert_almost_equal(arr, arr_out)
def test_0d_array(self):
with pytest.raises(TypeError):
ujson.encode(np.array(1))
@pytest.mark.parametrize("bad_input,exc_type,kwargs", [
([{}, []], ValueError, {}),
([42, None], TypeError, {}),
([["a"], 42], ValueError, {}),
([42, {}, "a"], TypeError, {}),
([42, ["a"], 42], ValueError, {}),
(["a", "b", [], "c"], ValueError, {}),
([{"a": "b"}], ValueError, dict(labelled=True)),
({"a": {"b": {"c": 42}}}, ValueError, dict(labelled=True)),
([{"a": 42, "b": 23}, {"c": 17}], ValueError, dict(labelled=True))
])
def test_array_numpy_except(self, bad_input, exc_type, kwargs):
with pytest.raises(exc_type):
ujson.decode(ujson.dumps(bad_input), numpy=True, **kwargs)
def test_array_numpy_labelled(self):
labelled_input = {"a": []}
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(["a"]) == output[1]).all()
assert output[2] is None
labelled_input = [{"a": 42}]
output = ujson.loads(ujson.dumps(labelled_input),
numpy=True, labelled=True)
assert (np.array([u("a")]) == output[2]).all()
assert (np.array([42]) == output[0]).all()
assert output[1] is None
# see gh-10837: write out the dump explicitly
# so there is no dependency on iteration order
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert output[1] is None
assert (np.array([u("a"), "b"]) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expected_vals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expected_vals == output[0]).all()
assert (np.array(["1", "2", "3"]) == output[1]).all()
assert (np.array(["a", "b"]) == output[2]).all()
class TestPandasJSONTests(object):
def test_dataframe(self, orient, numpy):
if orient == "records" and numpy:
pytest.skip("Not idiomatic pandas")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(df, **encode_kwargs),
**decode_kwargs)
# Ensure proper DataFrame initialization.
if orient == "split":
dec = _clean_dict(output)
output = DataFrame(**dec)
else:
output = DataFrame(output)
# Corrections to enable DataFrame comparison.
if orient == "values":
df.columns = [0, 1, 2]
df.index = [0, 1]
elif orient == "records":
df.index = [0, 1]
elif orient == "index":
df = df.transpose()
tm.assert_frame_equal(output, df, check_dtype=False)
def test_dataframe_nested(self, orient):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"])
nested = {"df1": df, "df2": df.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"df1": ujson.decode(ujson.encode(df, **kwargs)),
"df2": ujson.decode(ujson.encode(df, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_dataframe_numpy_labelled(self, orient):
if orient in ("split", "values"):
pytest.skip("Incompatible with labelled=True")
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
"a", "b"], columns=["x", "y", "z"], dtype=np.int)
kwargs = {} if orient is None else dict(orient=orient)
output = DataFrame(*ujson.decode(ujson.encode(df, **kwargs),
numpy=True, labelled=True))
if orient is None:
df = df.T
elif orient == "records":
df.index = [0, 1]
tm.assert_frame_equal(output, df)
def test_series(self, orient, numpy):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
encode_kwargs = {} if orient is None else dict(orient=orient)
decode_kwargs = {} if numpy is None else dict(numpy=numpy)
output = ujson.decode(ujson.encode(s, **encode_kwargs),
**decode_kwargs)
if orient == "split":
dec = _clean_dict(output)
output = Series(**dec)
else:
output = Series(output)
if orient in (None, "index"):
s.name = None
output = output.sort_values()
s.index = ["6", "7", "8", "9", "10", "15"]
elif orient in ("records", "values"):
s.name = None
s.index = [0, 1, 2, 3, 4, 5]
tm.assert_series_equal(output, s, check_dtype=False)
def test_series_nested(self, orient):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {"s1": s, "s2": s.copy()}
kwargs = {} if orient is None else dict(orient=orient)
exp = {"s1": ujson.decode(ujson.encode(s, **kwargs)),
"s2": ujson.decode(ujson.encode(s, **kwargs))}
assert ujson.decode(ujson.encode(nested, **kwargs)) == exp
def test_index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# Column indexed.
output = Index(ujson.decode(ujson.encode(i)), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i), numpy=True), name="index")
tm.assert_index_equal(i, output)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
output = Index(**dec)
tm.assert_index_equal(i, output)
assert i.name == output.name
output = Index(ujson.decode(ujson.encode(i, orient="values")),
name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name="index")
tm.assert_index_equal(i, output)
output = Index(ujson.decode( | ujson.encode(i, orient="records") | pandas._libs.json.encode |
from hyperopt import hp
import pandas as pd
import numpy as np
from pyFTS.models import hofts
from pyFTS.models.multivariate import granular
from pyFTS.partitioners import Grid, Entropy
from pyFTS.models.multivariate import variable
from pyFTS.common import Membership
from spatiotemporal.models.clusteredmvfts.fts import evolvingclusterfts
import warnings
warnings.filterwarnings("ignore")
############# High Order FTS ##############
hofts_space = {'partitioner': hp.choice('partitioner', [Grid.GridPartitioner, Entropy.EntropyPartitioner]),
'npartitions': hp.choice('npartitions', [10, 50,100]),
'order': hp.choice('order', [1,2]),
'input': hp.choice('input', ['DH3']),
'output': hp.choice('output', ['DH3'])}
def hofts_forecast(train_df, test_df, params):
_partitioner = params['partitioner']
_npartitions = params['npartitions']
_order = params['order']
_input = params['input']
_step = params.get('step',1)
fuzzy_sets = _partitioner(data=train_df[_input].values, npart=_npartitions)
model = hofts.HighOrderFTS(order=_order)
model.fit(train_df[_input].values, order=_order, partitioner=fuzzy_sets)
forecast = model.predict(test_df[_input].values, steps_ahead=_step)
return forecast
############# High Order FTS ##############
############# Vector Auto Regressive ##############
from statsmodels.tsa.api import VAR, DynamicVAR
var_space = {
'order': hp.choice('order', [1,2, 4, 8]),
'input': hp.choice('input', [['DH3', 'DH4','DH5','DH10','DH11','DH9','DH2', 'DH6','DH7','DH8']]),
'output': hp.choice('output', ['DH3'])}
def var_forecast(train_df, test_df, params):
_order = params['order']
_input = list(params['input'])
_output = params['output']
_step = params.get('step',1)
model = VAR(train_df[_input].values)
results = model.fit(_order)
lag_order = results.k_ar
params['order'] = lag_order
forecast = []
for i in np.arange(0,len(test_df)-lag_order-_step+1):
fcst = results.forecast(test_df[_input].values[i:i+lag_order],_step)
forecast.append(fcst[-1])
forecast_df = pd.DataFrame(columns=test_df[_input].columns, data=forecast)
return forecast_df[_output].values
############# Vector Auto Regressive ##############
############# MultiLayer Perceptron ##############
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.metrics import mean_squared_error
mlp_space = {'choice':
hp.choice('num_layers',
[
{'layers': 'two',
},
{'layers': 'three',
'units3': hp.choice('units3', [8, 16, 64, 128, 256, 512]),
'dropout3': hp.choice('dropout3', [0, 0.25, 0.5, 0.75])
}
]),
'units1': hp.choice('units1', [8, 16, 64, 128, 256, 512]),
'units2': hp.choice('units2', [8, 16, 64, 128, 256, 512]),
'dropout1': hp.choice('dropout1', [0, 0.25, 0.5, 0.75]),
'dropout2': hp.choice('dropout2', [0, 0.25, 0.5, 0.75]),
'batch_size': hp.choice('batch_size', [28, 64, 128, 256, 512]),
'order': hp.choice('order', [1, 2, 3]),
'input': hp.choice('input', [['DH4','DH5','DH6']]),
'output': hp.choice('output', ['DH4']),
'epochs': hp.choice('epochs', [100, 200, 300])}
# convert series to supervised learning
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = | pd.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import os
import sys
import pandas as pd
import numpy as np
# In[3]:
# In[4]:
#file 불러오기
#filepath = sys.argv[1]
#filename = sys.argv[2]
filepath = "/home/data/projects/rda/workspace/rda/files/"
filename = "input3.csv"
data = pd.read_csv(filepath + "/" + filename, encoding='UTF-8')
# In[ ]:
#사용자 지정 parameter
#kmeans
'''
k_clusters = int(sys.argv[3])
k_iter = int(sys.argv[4])
#dbscan
eps = float(sys.argv[5])
min_samples = int(sys.argv[6])
#hierarchy
h_clusters = int(sys.argv[7])
'''
# In[ ]:
k_clusters = 5
k_iter = 300
#dbscan
eps = 0.5
min_samples =3
#hierarchy
h_clusters = 3
#모든 feature에 대해 결측치 갖는 샘플 제거
data_0 =data.dropna(axis=0,how='all')
print(data_0.shape)
#label 값이 결측치인 샘플 제거
data_l =data.loc[data["label"].notnull(), :]
print(data_l.shape)
#50%이상이 결측치인 feature 삭제
data_f =data_l.dropna(axis=1,thresh=data_l.shape[0]/2)
print(data_f.shape)
#나머지는 각 label에 대해서 median imputation 수행
data_na_remove = data_f.fillna(data_f.mean())
print(data_na_remove.shape)
data_na_remove
# In[17]:
print(data_na_remove.shape)
data = data_na_remove.iloc[:100,:5]
X = data_na_remove.iloc[:100,1:5]
Y = data_na_remove.iloc[:100,0] #임의의
data_na_remove["label"].unique()
# In[ ]:
from sklearn.cluster import KMeans, DBSCAN ,AgglomerativeClustering
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics import silhouette_score
ari =[]
nmi =[]
silhouette =[]
#kmeans
kmeans = KMeans(n_clusters= k_clusters,max_iter=k_iter).fit(X)
predict_k = pd.DataFrame(kmeans.predict(X))
predict_k.columns=['predict_kmeans']
#concat
data_k = pd.concat([data,predict_k],axis=1)
#scores
ari.append(adjusted_rand_score(Y,kmeans.predict(X)))
nmi.append(normalized_mutual_info_score(Y,kmeans.predict(X)))
silhouette.append(silhouette_score(X,kmeans.predict(X)))
#dbscan
dbscan = DBSCAN(eps= eps,min_samples= min_samples)
predict_db = pd.DataFrame(dbscan.fit_predict(X))
predict_db.columns=['predict_dbscan']
# concat
data_d = pd.concat([data_k,predict_db],axis=1)
#scores
ari.append(adjusted_rand_score(Y,dbscan.fit_predict(X)))
nmi.append(normalized_mutual_info_score(Y,dbscan.fit_predict(X)))
silhouette.append(silhouette_score(X,dbscan.fit_predict(X)))
# hierarchy
hierarchy = AgglomerativeClustering(n_clusters= h_clusters)
predict_h = pd.DataFrame(hierarchy.fit_predict(X))
predict_h.columns=['predict_hierarchy']
#concat
data_h = pd.concat([data_d,predict_h],axis=1)
#scores
ari.append(adjusted_rand_score(Y,hierarchy.fit_predict(X)))
nmi.append(normalized_mutual_info_score(Y,hierarchy.fit_predict(X)))
silhouette.append(silhouette_score(X,hierarchy.fit_predict(X)))
#data save
#data_h.to_csv('./public/files/cluster_data2_' + filename + '_.csv')
#data_h.to_csv('./cluster_data2_' + filename + '_.csv', mode = "w",encoding='cp949')
#clustering score save
score = pd.concat([pd.Series(ari), | pd.Series(nmi) | pandas.Series |
# Essentials
import pandas as pd
import numpy as np
# Plots
import matplotlib.pyplot as plt
from tqdm import tqdm
# Models
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
import xgboost as xgb
# Misc
from rdkit import Chem
from sklearn.model_selection import GridSearchCV, cross_validate, RandomizedSearchCV, StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import classification_report, confusion_matrix, precision_score, recall_score, f1_score, \
roc_auc_score, precision_recall_curve, average_precision_score
from imblearn.pipeline import make_pipeline
from imblearn.over_sampling import SMOTENC
from collections import Counter
import re, requests
# Functions
import create_fingerprints as cf
import create_descriptors as cd
def create_original_df(usedf=False, file=None, write_s=False, write_off=False):
# Create dataframe from csv
if not usedf:
df = pd.read_csv("./datasets/sider.csv")
else:
df = file.copy()
# Extract SMILES column
df_molecules = pd.DataFrame(df["smiles"])
# Converting to molecules
df_molecules["mols"] = df_molecules["smiles"].apply(Chem.MolFromSmiles)
# Droping mols and smiles
df_y = df.drop("smiles", axis=1)
# Write to csv
if write_s:
df_molecules.to_csv("./dataframes/df_molecules.csv")
df_y.to_csv("./dataframes/df_y.csv")
if write_off:
df_molecules.to_csv("./dataframes/df_off_mols.csv")
df_y.to_csv("./dataframes/df_off_y.csv")
return df_y, df_molecules
def createfingerprints(df_mols, length):
# Morgan Fingerprint (ECFP4)
ecfp_df = cf.create_ecfp4_fingerprint(df_mols, length, False)
# MACCS keys (always 167)
maccs_df = cf.create_maccs_fingerprint(df_mols, False)
# ATOM PAIRS
atom_pairs_df = cf.create_atompairs_fingerprint(df_mols, length, False)
# Topological torsion
tt_df = cf.create_topological_torsion_fingerprint(df_mols, length, False)
return ecfp_df, maccs_df, atom_pairs_df, tt_df
def createdescriptors(df_molecules):
# Descriptors
df_mols_desc = cd.calc_descriptors(df_molecules, False)
return df_mols_desc
def test_fingerprint_size(df_mols, df_y, model, colname="Hepatobiliary disorders", num_sizes_to_test=20, min_size=100,
max_size=2048, cv=10, makeplots=False, write=False):
# Fingerprint length type and selection
# Scoring metrics to use
scoring_metrics = ("f1_micro", "f1_macro", "f1", "roc_auc", "recall", "precision", "average_precision")
sizes = np.linspace(min_size, max_size, num_sizes_to_test, dtype=int)
# Create results dataframes for each metric
results_f1 = np.zeros([4, len(sizes)])
results_rocauc = np.zeros([4, len(sizes)])
results_precision = np.zeros([4, len(sizes)])
results_recall = np.zeros([4, len(sizes)])
results_average_precision = np.zeros([4, len(sizes)])
results_f1_micro = np.zeros([4, len(sizes)])
results_f1_macro = np.zeros([4, len(sizes)])
# Get test sizes
c = 0
# Size testing using SVC with scale gamma (1 / (n_features * X.var()))
for s in tqdm(sizes):
# Create fingerprint with size S
fingerprints = createfingerprints(df_mols, int(s))
r = 0
for fp in fingerprints:
X = fp.copy()
# Using "Hepatobiliary disorders" as an results example since its balanced
y = df_y[colname].copy()
# 10-fold cross validation
cv_scores = cross_validate(model, X, y, cv=cv, scoring=scoring_metrics, return_train_score=False, n_jobs=-1)
for k, v in cv_scores.items():
if k == "test_roc_auc":
results_rocauc[r, c] = v.mean()
if k == "test_precision":
results_precision[r, c] = v.mean()
if k == "test_recall":
results_recall[r, c] = v.mean()
if k == "test_average_precision":
results_average_precision[r, c] = v.mean()
if k == "test_f1":
results_f1[r, c] = v.mean()
if k == "test_f1_micro":
results_f1_micro[r, c] = v.mean()
if k == "test_f1_macro":
results_f1_macro[r, c] = v.mean()
r += 1
c += 1
all_results = (results_rocauc, results_precision, results_recall, results_average_precision, results_f1,
results_f1_micro, results_f1_macro)
# Create dataframe for results
df_results_rocauc_size_SVC = pd.DataFrame(results_rocauc, columns=sizes)
df_results_precision_size_SVC = pd.DataFrame(results_precision, columns=sizes)
df_results_recall_size_SVC = pd.DataFrame(results_recall, columns=sizes)
df_results_av_prec_size_SVC = | pd.DataFrame(results_average_precision, columns=sizes) | pandas.DataFrame |
import locale
import numpy as np
import pytest
from pandas.compat import (
is_platform_windows,
np_version_under1p19,
)
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
from pandas.core.arrays.floating import (
Float32Dtype,
Float64Dtype,
)
def test_uses_pandas_na():
a = pd.array([1, None], dtype=Float64Dtype())
assert a[1] is pd.NA
def test_floating_array_constructor():
values = np.array([1, 2, 3, 4], dtype="float64")
mask = np.array([False, False, False, True], dtype="bool")
result = FloatingArray(values, mask)
expected = pd.array([1, 2, 3, np.nan], dtype="Float64")
tm.assert_extension_array_equal(result, expected)
tm.assert_numpy_array_equal(result._data, values)
tm.assert_numpy_array_equal(result._mask, mask)
msg = r".* should be .* numpy array. Use the 'pd.array' function instead"
with pytest.raises(TypeError, match=msg):
FloatingArray(values.tolist(), mask)
with pytest.raises(TypeError, match=msg):
FloatingArray(values, mask.tolist())
with pytest.raises(TypeError, match=msg):
FloatingArray(values.astype(int), mask)
msg = r"__init__\(\) missing 1 required positional argument: 'mask'"
with pytest.raises(TypeError, match=msg):
FloatingArray(values)
def test_floating_array_disallows_float16(request):
# GH#44715
arr = np.array([1, 2], dtype=np.float16)
mask = np.array([False, False])
msg = "FloatingArray does not support np.float16 dtype"
with pytest.raises(TypeError, match=msg):
| FloatingArray(arr, mask) | pandas.core.arrays.FloatingArray |
#!/usr/bin/env python
import argparse
import csv
import json
import sys
import time
from confluent_kafka import Producer
import socket
from newsapi import NewsApiClient
import http.client
import urllib.parse
import pandas as pd
import numpy as np
offset = 0
def acked(err, msg):
if err is not None:
print("Failed to deliver message: %s: %s" %
(str(msg.value()), str(err)))
else:
print("Message produced: %s" % (str(msg.value())))
def main():
counter = 0
current_article_count = None
parser = argparse.ArgumentParser(description=__doc__)
# parser.add_argument('filename', type=str,
# help='Time series csv file.')
parser.add_argument('topic', type=str,
help='Name of the Kafka topic to stream.')
# parser.add_argument('--speed', type=float, default=1, required=False,
# help='Speed up time series by a given multiplicative factor.')
args = parser.parse_args()
topic = args.topic
p_key1 = "newsapi"
p_key2 = "mediastack"
conf = {'bootstrap.servers': "localhost:9092",
'client.id': socket.gethostname()}
producer = Producer(conf)
running = True
while running:
try:
# mediastack news
data = combine_cat_data()
print("API time break.....")
for article in data:
payload = {
'title': article["title"],
"category": article["category"],
"description": article["description"]
}
payload = json.dumps(payload)
producer.produce(topic=topic, key=p_key2,
value=payload, callback=acked)
producer.flush()
time.sleep(90) # temp change
except Exception as e:
if e == TypeError:
sys.exit()
else:
print(e)
def get_mediastack():
global offset
conn = http.client.HTTPConnection('api.mediastack.com')
params = urllib.parse.urlencode({
'access_key': '85b48d9edcb0a2a1d38c7e0ac0eb8919', # ysusheen api key
# 'categories': '-general,-sports,-bussiness,-entertainment,-health,-science,-technology',
'sort': 'published_desc',
'language': "en,-ar,-de,-es,-fr,-he,-it,-nl,-no,-pt,-ru,-se,-zh",
'limit': 100,
})
conn.request('GET', '/v1/news?{}'.format(params))
res = conn.getresponse()
data = res.read().decode("utf-8")
data = json.loads(data)
articles = data["data"]
articles = list(
filter(
lambda article: True if article["language"] == 'en' else False, articles))
return articles
def get_balanced_mediastack(category, offset=0):
conn = http.client.HTTPConnection('api.mediastack.com')
params = urllib.parse.urlencode({
'access_key': '85b48d9edcb0a2a1d38c7e0ac0eb8919',
'sort': 'published_desc',
'language': "en,-ar,-de,-es,-fr,-he,-it,-nl,-no,-pt,-ru,-se,-zh",
'categories': category,
'offset': offset,
'limit': 50,
})
try:
conn.request('GET', '/v1/news?{}'.format(params))
res = conn.getresponse()
data = res.read().decode("utf-8")
data = json.loads(data)
articles = data["data"]
articles = list(
filter(
lambda article: True if article["language"] == 'en' else False, articles))
articles = map(lambda article: {
'title': article["title"],
"category": article["category"],
"description": article["description"]
}, articles)
return | pd.DataFrame(articles) | pandas.DataFrame |
from IPython import embed
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
import pandas as pd
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns true if the response seems to be Markdown, false otherwise
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('text/plain') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def read_markdown(raw_md):
""" Reads the raw MarkDown and return the table and callback strings """
read_table = False
read_callback = False
data = []
data_callback = []
for line in raw_md.split(b'\n'):
line = line.decode('UTF-8')
if line == '| Option | Data-Attr | Defaults | Type | Description |':
read_table = True
if read_table:
data.append([col.strip().strip('`') for col in line.split('|') if col])
if line == '## Description of data passed to callbacks (onChange and etc.)':
read_table=False
if line == '## Description of data passed to callbacks (onChange and etc.)':
read_callback = True
if read_callback:
data_callback.append(line)
if line == '## Creating slider (all params)':
read_callback = False
data = [row for row in data[:-1] if row]
data_callback = [row for row in data_callback[4:-4] if row]
return data, data_callback
def split_callback_line(line):
split_1 = line.split(':')
name = split_1[0].strip().strip('"')
split_2 = split_1[1].split('//')
val = split_2[0].strip().rstrip(',')
comment = split_2[1].strip()
return (name, val, comment)
def data_callback_to_frame(data_callback):
""" Converts the split callback lines to a pd.DataFrame """
data = []
for line in data_callback:
name, val, comment = split_callback_line(line)
valtype = type(eval(val))
if valtype == str:
typetype = 'string'
elif valtype == int or valtype == float:
typetype = 'number'
elif valtype == type:
typetype = 'JQuery'
else:
raise Exception("Could not parse '{!s}'".format(name))
data.append((name, typetype, comment))
#fmt = ' {!s}: {!s} {!s}'
#row_fmt = fmt.format(name, typetype, comment)
df = | pd.DataFrame(data, columns=['Name', 'Type', 'Description']) | pandas.DataFrame |
"""Utility functions shared across the Aquarius project."""
import ftplib
import os
import logging
import gzip
import numpy as np
import pandas as pd
import yaml
import json
from datetime import timedelta, date, datetime
from dfply import (
X,
group_by,
summarize,
mask,
n,
transmute,
select,
left_join,
ungroup,
arrange,
mutate,
)
from tqdm import tqdm
from typing import Tuple, Dict, List, Optional, NamedTuple
from geopy.distance import geodesic
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from bokeh.models import HoverTool, ColumnDataSource, VArea, Line, VBar
from bokeh.tile_providers import get_provider, STAMEN_TERRAIN
class ECDF:
"""Empirical Cumulative Distribution Function with linear interpolation."""
def __init__(self):
self.x_values = None
self.cdf_values = None
def fit(self, xdata: np.ndarray, weights: Optional[np.ndarray] = None):
if weights is None:
ind_valid = ~np.isnan(xdata)
xv = xdata[ind_valid]
values, counts = np.unique(xv, return_counts=True)
sort_index = np.argsort(values)
self.x_values = values[sort_index]
self.cdf_values = (np.cumsum(counts[sort_index]) - 0.5)/np.sum(counts)
else:
assert len(xdata) == len(weights)
ind_valid = ~np.isnan(xdata) & ~np.isnan(weights)
xv = xdata[ind_valid]
wv = weights[ind_valid]
sorter = np.argsort(xv)
values = xv[sorter]
sample_weight = wv[sorter]
weighted_quantiles = (np.cumsum(sample_weight) - 0.5 * sample_weight) / np.sum(sample_weight)
unique_values, unique_index, unique_counts = np.unique(values, return_index=True, return_counts=True)
self.x_values = unique_values
self.cdf_values = weighted_quantiles[unique_index + unique_counts - 1] # last index instead of first index
return self
def eval(self, x: np.ndarray):
cdf = np.interp(x, xp=self.x_values, fp=self.cdf_values, left=0, right=1)
return cdf
def quantile(self, q: np.ndarray):
assert np.all(q >= 0) and np.all(q <= 1), 'quantiles should be in [0, 1]'
xq = np.interp(q, xp=self.cdf_values, fp=self.x_values, left=self.x_values[0], right=self.x_values[-1])
return xq
def download_ghcn_file(ftp_filename: str, save_dir: str):
logging.debug(f"ftp_filename={ftp_filename}")
logging.debug(f"save_dir={save_dir}")
ftp = ftplib.FTP(host='ftp.ncdc.noaa.gov', timeout=10.0, user='anonymous', passwd='<PASSWORD>')
logging.debug("FTP server connected")
ftp.cwd("/pub/data/ghcn/daily/by_year/")
save_path = os.path.join(save_dir, ftp_filename)
logging.debug(f"downloading {save_path}")
with open(save_path, 'wb') as file:
ftp.retrbinary(f"RETR {ftp_filename}", file.write)
logging.debug(f"downloaded {save_path}")
return 1
def unzip_file(filename: str, folder: str):
read_path = os.path.join(folder, filename)
logging.debug(f"unzipping {read_path}")
f = gzip.open(read_path, 'rb')
file_content = f.read()
f.close()
logging.debug(f"unzipped {read_path}")
return file_content
def df_file(filename: str, folder: str) -> pd.DataFrame:
# based on https://stackoverflow.com/questions/31028815/how-to-unzip-gz-file-using-python
read_path = os.path.join(folder, filename)
logging.debug(f"unzipping and reading {read_path}")
with gzip.open(read_path, 'rb') as f:
df = pd.read_csv(
f,
header=None,
names=['station', 'dateto', 'element', 'value', 'm_flag', 'q_flag', 's_flag', 'obs_time'],
parse_dates=['dateto'],
)
logging.debug(f"read {read_path}")
return df
def get_config():
with open('config.yaml', 'r') as file:
config = yaml.safe_load(file)
return config
def load_all_years(year_from: int, year_to: int, save_dir: str):
for year in range(year_from, year_to + 1):
filename = f"{year}.csv.gz"
download_ghcn_file(filename, save_dir)
logging.debug("completed")
def extract_one_prcp(filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_one_station_prcp(station: str, filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> mask(X.station == station) >> \
transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_one_station_startswith(station_startswith: str, filename: str, by_year_path: str, prcp_path: str):
df = df_file(filename, by_year_path)
df_sel = df >> mask(X.element == 'PRCP') >> mask(X.station.str.startswith(station_startswith)) >> \
transmute(station=X.station, dateto=X.dateto, prcp=X.value)
logging.debug(f"{df_sel.shape[0]} out of {df.shape[0]} rows selected")
year_string = filename.split('.')[0]
df_sel.to_csv(os.path.join(prcp_path, f"{year_string}.csv"), sep=',', index=False)
logging.debug(f"{filename} processed")
def extract_all_prcp(by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_prcp(filename, by_year_path, prcp_path)
return 1
def extract_all_prcp_station(station: str, by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_station_prcp(station, filename, by_year_path, prcp_path)
return 1
def extract_all_prcp_station_startswith(station_startswith: str, by_year_path: str, prcp_path: str):
if not os.path.isdir(prcp_path):
os.makedirs(prcp_path)
for filename in sorted(os.listdir(by_year_path), reverse=True):
extract_one_station_startswith(station_startswith, filename, by_year_path, prcp_path)
return 1
def ded(prcp: pd.DataFrame) -> date:
logging.debug(f"{prcp.shape[0]} station*days")
station_ded = prcp >> group_by(X.station) >> summarize(ded=X.dateto.max())
logging.debug(f"{station_ded.shape[0]} stations")
data_end_dt = station_ded['ded'].quantile(0.90)
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"data_end_date={data_end_date}")
return data_end_date
def date_limits(prcp: pd.DataFrame) -> tuple:
logging.debug(f"{prcp.shape[0]} station*days")
station_ded = prcp >> group_by(X.station) >> summarize(dsd=X.dateto.min(), ded=X.dateto.max())
logging.debug(f"{station_ded.shape[0]} stations")
data_start_date = station_ded['dsd'].quantile(0.10)
data_end_date = station_ded['ded'].quantile(0.90)
return data_start_date, data_end_date
def df_prcp(year: int, prcp_path=None) -> pd.DataFrame:
if prcp_path is None:
prcp_path = '../../data/prcp_ruzyne'
filename = os.path.join(prcp_path, f'{year}.csv')
logging.debug(f"reading {filename}")
prcp = pd.read_csv(filename, parse_dates=['dateto']) >> arrange(X.dateto, X.station)
return prcp
def active_stations(prcp: pd.DataFrame, date_valid, config) -> pd.DataFrame:
prcp_valid = prcp >> mask(X.dateto <= date_valid)
data_end_date = ded(prcp_valid)
logging.debug(f"data_end_date={data_end_date}")
logging.debug(f"active_period_length_days={config['active_period_length_days']}")
active_start_date = data_end_date - timedelta(days=config['active_period_length_days']-1)
logging.debug(f"active_start_date={active_start_date}")
prcp_window = prcp_valid >> mask(X.dateto >= active_start_date)
prcp_active = prcp_window >> group_by(X.station) >> summarize(num_observed_days=n(X.prcp)) >> arrange(X.station)
prcp_active['is_active'] = prcp_active['num_observed_days'] >= config['active_period_min_days']
return prcp_active >> ungroup()
def transpose_to_stations(prcp_path: str, stations_path: str):
# deprecated - too slow
all_files = sorted(os.listdir(prcp_path), reverse=True)
num_files = len(all_files)
logging.debug(f"{num_files} files in {prcp_path}")
for i_file, filename in enumerate(all_files):
year = int(filename.split('.')[0])
df = df_prcp(year)
stations = df['station'].unique().sort_values()
num_stations = len(stations)
logging.debug(f"{num_stations} stations in {filename}")
for i_station, station in enumerate(stations):
df_sel = df >> mask(X.station == station) >> select(X.dateto, X.prcp)
out_filename = os.path.join(stations_path, f"{station}.csv")
if os.path.isfile(out_filename):
df_sel.to_csv(out_filename, mode='a', index=False, header=False)
else:
df_sel.to_csv(out_filename, mode='w', index=False, header=True)
logging.debug(f"file={i_file}/{num_files} station={i_station}/{num_stations} processed")
logging.debug(f"{filename} processed")
logging.debug("transpose completed")
def make_recent(data_end_date: date, config) -> pd.Series:
"""Make daily calendar with period taking values True=recent and False=preceding."""
num_days_recent = 365*config['recent_time_window_years']
num_days_preceding = 365*config['preceding_time_window_max_years']
num_days = num_days_recent + num_days_preceding
date_axis = np.flip(pd.date_range(end=data_end_date, periods=num_days, freq='D'))
calendar_values = np.concatenate([
np.ones(num_days_recent, dtype=bool),
np.zeros(num_days_preceding, dtype=bool),
])
calendar = pd.Series(calendar_values, index=date_axis)
logging.debug((
f"calendar with {num_days} days from {date_axis[-1]} to {date_axis[0]} "
f"with recent period of {num_days_recent} from {date_axis[num_days_recent-1]}"
))
return calendar
def update_drought(df_running: pd.DataFrame, df_update: pd.DataFrame, calendar: pd.Series) -> pd.DataFrame:
"""Update drought statistics with time series from a new time period."""
if df_update.shape[0] > 0:
assert "station" in df_running.columns
assert "station" in df_update.columns
assert "dateto" in df_update.columns
running_columns = [
'recent_time_window_days',
'recent_days_observed',
'recent_fill_rate',
'recent_precipitation_mm',
'recent_precipitation_annual_mean',
'preceding_time_window_days',
'preceding_days_observed',
'preceding_fill_rate',
'preceding_precipitation_mm',
'preceding_precipitation_annual_mean',
]
for column in running_columns:
if column not in df_running.columns:
df_running[column] = 0
d1, d2 = date_limits(df_update)
logging.debug(f"date_limits: {d1} and {d2}")
calendar_recent = pd.DataFrame({'dateto': calendar[calendar].index})
recent_start_date = calendar_recent.iat[-1, 0]
recent_end_date = calendar_recent.iat[0, 0]
calendar_preceding = pd.DataFrame({'dateto': calendar[~calendar].index})
preceding_start_date = calendar_preceding.iat[-1, 0]
preceding_end_date = calendar_preceding.iat[0, 0]
d1_recent = max(d1, recent_start_date)
d2_recent = min(d2, recent_end_date)
recent_delta_days = max((d2_recent - d1_recent).days + 1, 0)
logging.debug(f"recent_delta_days={recent_delta_days}")
d1_preceding = max(d1, preceding_start_date)
d2_preceding = min(d2, preceding_end_date)
preceding_delta_days = max((d2_preceding - d1_preceding).days + 1, 0)
logging.debug(f"preceding_delta_days={preceding_delta_days}")
if (recent_delta_days > 0) or (preceding_delta_days > 0):
logging.debug("proceeding")
df_base = df_running[['station']].copy()
df_update_recent = calendar_recent >> \
left_join(df_update, by='dateto') >> \
group_by(X.station) >> \
summarize(
recent_days_observed=n(X.prcp),
recent_precipitation_mm=X.prcp.sum()/10,
)
if df_update_recent.shape[0] == 0: # df_update does not intersect recent window
df_update_recent = df_base.copy()
df_update_recent['recent_days_observed'] = 0
df_update_recent['recent_precipitation_mm'] = 0.0
# logging.debug(df_update_recent.head())
df_update_preceding = calendar_preceding >> \
left_join(df_update, by='dateto') >> \
group_by(X.station) >> \
summarize(
preceding_days_observed=n(X.prcp),
preceding_precipitation_mm=X.prcp.sum()/10
)
if df_update_preceding.shape[0] == 0: # df_update does not intersect preceding window
df_update_preceding = df_base.copy()
df_update_preceding['preceding_days_observed'] = 0
df_update_preceding['preceding_precipitation_mm'] = 0.0
# logging.debug(df_update_preceding.head())
df_delta = df_base.copy() >> \
left_join(df_update_recent, by='station') >> \
left_join(df_update_preceding, by='station')
df_delta.fillna(value=0, inplace=True)
assert df_delta.shape[0] == df_running.shape[0]
recent_time_window_days = df_running.recent_time_window_days + recent_delta_days
preceding_time_window_days = df_running.preceding_time_window_days + preceding_delta_days
recent_days_observed = df_running.recent_days_observed + df_delta.recent_days_observed
preceding_days_observed = df_running.preceding_days_observed + df_delta.preceding_days_observed
recent_fill_rate = recent_days_observed / recent_time_window_days
preceding_fill_rate = preceding_days_observed / preceding_time_window_days
recent_precipitation_mm = df_running.ecent_precipitation_mm + df_delta.recent_precipitation_mm
preceding_precipitation_mm = df_running.preceding_precipitation_mm + df_delta.preceding_precipitation_mm
recent_precipitation_annual_mean = recent_precipitation_mm / recent_days_observed * 365
preceding_prcp_annual_mean = preceding_precipitation_mm / preceding_days_observed * 365
df_running['recent_time_window_days'] = recent_time_window_days
df_running['recent_days_observed'] = recent_days_observed
df_running['recent_fill_rate'] = recent_fill_rate
df_running['recent_precipitation_mm'] = recent_precipitation_mm
df_running['recent_precipitation_annual_mean'] = recent_precipitation_annual_mean
df_running['preceding_time_window_days'] = preceding_time_window_days
df_running['preceding_days_observed'] = preceding_days_observed
df_running['preceding_fill_rate'] = preceding_fill_rate
df_running['preceding_precipitation_mm'] = preceding_precipitation_mm
df_running['preceding_precipitation_annual_mean'] = preceding_prcp_annual_mean
df_running['dq_flag'] = (recent_fill_rate >= 0.90) & (preceding_fill_rate >= 0.80)
df_running['drought_index'] = 100*(1 - recent_precipitation_annual_mean / preceding_prcp_annual_mean)
else:
logging.debug("skipping")
else:
logging.debug("df_running is empty")
return df_running
def get_current_year() -> int:
y0 = date.today().year
return y0
def get_oldest_year() -> int:
current_year = get_current_year()
config = get_config()
oldest_year = current_year - \
config['drought_window_years'] - \
config['recent_time_window_years'] - \
config['preceding_time_window_min_years']
return oldest_year
def calculate_drought(
stations: pd.DataFrame,
data_end_date: date,
prcp_path: str,
out_path: str,
) -> pd.DataFrame:
logging.info(f"{stations.shape[0]} active stations with data_end_date={data_end_date}")
config = get_config()
calendar = make_recent(data_end_date, config)
year_to = calendar.index[0].year
year_from = calendar.index[-1].year
years = range(year_to, year_from - 1, -1)
logging.info(f"processing {len(years)} years from {year_to} back to {year_from}")
for year in years:
logging.info(f"year={year}")
prcp_year = df_prcp(year, prcp_path)
stations = update_drought(stations, prcp_year, calendar)
logging.info(f"{stations['dq_flag'].sum()} data quality passed")
stations.to_csv(f'{out_path}/{data_end_date.isoformat()[:10]}.csv', index=False)
logging.debug(f"\n{stations.head(10)}")
aquarius = stations >> mask(X.dq_flag) >> \
summarize(
min=X.drought_index.min(),
p25=X.drought_index.quantile(0.25),
p50=X.drought_index.quantile(0.50),
p75=X.drought_index.quantile(0.75),
max=X.drought_index.max(),
)
return aquarius
def load_countries() -> pd.DataFrame:
countries_file = '../../data/station/ghcnd-countries-continent.txt'
cdf_list = []
with open(countries_file, 'r') as file:
for line in file:
country_code = line[:2]
continent_code = line[3:5]
country_name = line[6:].rstrip()
cdf_row = (country_code, continent_code, country_name)
cdf_list.append(cdf_row)
logging.debug(f"{len(cdf_list)} countries parsed")
cdf = pd.DataFrame(cdf_list, columns=['country_code', 'continent_code', 'country_name'])
continent = {
'EU': 'Europe',
'AS': 'Asia',
'AF': 'Africa',
'NA': 'North America',
'SA': 'South America',
'OC': 'Oceania',
'AN': 'Antarctica',
}
cdf['continent_name'] = cdf['continent_code'].apply(lambda x: continent[x])
return cdf
def load_stations() -> pd.DataFrame:
stations_file = '../../data/station/ghcnd-stations.txt'
stations_list = []
with open(stations_file, 'r') as file:
for line in file:
country_code = line[:2]
station = line[:11]
latitude = float(line[12:20])
longitude = float(line[21:30])
elevation = float(line[31:37])
station_name = line[41:71].rstrip().lower()
stations_row = (station, country_code, latitude, longitude, elevation, station_name)
stations_list.append(stations_row)
logging.debug(f"{len(stations_list)} stations parsed")
colnames = ['station', 'country_code', 'latitude', 'longitude', 'elevation', 'station_name']
sdfbase = pd.DataFrame(stations_list, columns=colnames)
cdf = load_countries()
sdf = sdfbase.merge(cdf, how='left', on='country_code').set_index('station')
return sdf
def load_country_continent() -> pd.DataFrame:
cc_file = '../../data/station/country-and-continent-codes-list-csv_csv.txt'
ccdf = pd.read_csv(cc_file, sep=",")
return ccdf
def chunker(seq, size):
# from http://stackoverflow.com/a/434328
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def insert_with_progress(df, engine, table_name: str, chunksize=None, reset_index=True):
if reset_index:
dfi = df.reset_index()
else:
dfi = df
if chunksize is None:
chunksize = int(len(dfi) / 10) # 10%
with tqdm(total=len(dfi)) as pbar:
for i, cdf in enumerate(chunker(dfi, chunksize)):
cdf.to_sql(con=engine, name=table_name, if_exists="append", index=False)
pbar.update(chunksize)
def extract_one_prcp_to_sql(filename: str, by_year_path: str, engine, table_name: str):
keys = ['station', 'dateto']
df = df_file(filename, by_year_path)
logging.debug(f"dateframe {df.shape} loaded")
df_sel = df >> mask(X.element == 'PRCP') >> transmute(station=X.station, dateto=X.dateto, prcp_mm=X.value / 10)
logging.debug(f"prcp data {df_sel.shape} extracted")
dmin, dmax = (df_sel['dateto'].min(), df_sel['dateto'].max())
df_sorted = df_sel.set_index(keys)
sql_mirror = (
"select station, dateto\n"
f"from {table_name}\n"
f"where dateto between '{dmin}' and '{dmax}'\n"
"order by station, dateto"
)
df_mirror = pd.DataFrame(engine.execute(sql_mirror).fetchall(), columns=keys).set_index(keys)
df_mirror['indb'] = True
logging.debug(f"mirror data {df_mirror.shape} extracted")
if df_mirror.shape[0] == 0:
df_joined = df_sorted
df_joined['indb'] = False
else:
df_joined = df_sorted.join(df_mirror, on=keys, how='left', sort=True)
df_joined['indb'] = df_joined['indb'].fillna(False)
if (~df_joined['indb']).sum() > 0:
df_filtered = df_joined >> mask(~X.indb)
df_increment = df_filtered >> select(X.prcp_mm)
logging.debug(f"sql insert to {table_name} in progress")
insert_with_progress(df_increment, engine, table_name, chunksize=100)
logging.debug(f"insert to {table_name} completed")
else:
logging.debug("increment is empty")
def extract_all_prcp_to_sql(by_year_path: str, engine, table_name: str):
files = sorted(os.listdir(by_year_path))
nfiles = len(files)
for i, filename in enumerate(files):
logging.debug(f"{i + 1}/{nfiles} {filename}")
extract_one_prcp_to_sql(filename, by_year_path, engine, table_name)
logging.debug("extract completed")
def find_topk_nearest(k: int, station, index, x1, sortindex1, x2, sortindex2) -> List[dict]:
nst = len(index) # total number of stations
point = (station.latitude, station.longitude)
i1 = np.where(x1[sortindex1] == point[0])[0][0]
i2 = np.where(x2[sortindex2] == point[1])[0][0]
n1 = 100 # intial perimeter, expert guess, works on ruzyne
n2 = 100 # intial perimeter, expert guess, works on ruzyne
inperim = np.zeros(nst, dtype=bool)
ninp = 1
while ninp < k + 1:
i1lb = max(i1 - n1, 0)
i1ub = min(i1 + n1, nst - 1)
x1lb, x1ub = (x1[sortindex1][i1lb], x1[sortindex1][i1ub])
i2lb = max(i2 - n2, 0)
i2ub = min(i2 + n2, nst - 1)
x2lb, x2ub = (x2[sortindex2][i2lb], x2[sortindex2][i2ub])
inperim = (x1 >= x1lb) & (x1 <= x1ub) & (x2 >= x2lb) & (x2 <= x2ub)
ninp = np.sum(inperim)
n1 *= 2
n2 *= 2
distvec = np.array([geodesic(point, station_point).km for station_point in zip(x1[inperim], x2[inperim])])
indout = np.argsort(distvec)[1:k + 1]
result = [{'station': stid, 'dist_km': disti} for stid, disti in zip(index[indout], distvec[indout])]
return result
def find_nearest_stations(stations: pd.DataFrame) -> Dict[str, List]:
topk = 3
x1 = stations['latitude'].values
x2 = stations['longitude'].values
sortindex1 = np.argsort(x1)
sortindex2 = np.argsort(x2)
result = {}
for station in tqdm(stations.itertuples(), total=len(stations)):
topn_list = find_topk_nearest(
k=topk,
station=station,
index=stations.index,
x1=x1,
sortindex1=sortindex1,
x2=x2,
sortindex2=sortindex2)
result[station.Index] = topn_list
return result
def get_nearest_stations() -> Dict[str, list]:
with open('../../data/station/nearest_stations.json', 'r') as file:
nearest = json.load(file)
return nearest
def df_station(station: str, engine=None) -> pd.DataFrame:
if engine is None:
engine = create_engine('postgresql://postgres:@localhost/ghcn')
q = engine.execute(f"select * from prcp where station='{station}' order by dateto").fetchall()
df = pd.DataFrame(q, columns=['station', 'dateto', 'prcp_mm'])
return df.set_index(['station', 'dateto'])
def make_day_index(year: int) -> pd.DataFrame:
"""
Make calendar with day index where 0=last day of the previous year, 1=first day of the year.
It spans the current year and two previous years, so the range is -730 to +365,
which is 1095 days for one station and year.
"""
start_date = date(year-2, 1, 1)
end_date = date(year, 12, 31)
zero_date = datetime(year-1, 12, 31)
date_axis = pd.date_range(start=start_date, end=end_date, freq='D')
day_index = (date_axis - zero_date).days
calendar = pd.DataFrame({
'year': year,
'dateto': [date(d.year, d.month, d.day) for d in date_axis],
'day_index': day_index,
}, columns=['year', 'dateto', 'day_index'])
# logging.debug(f"calendar with {len(date_axis)} days from {date_axis[0]} to {date_axis[-1]}")
return calendar
def calc_reference_station_year(prcp: pd.DataFrame, year: int) -> pd.DataFrame:
keys = ['station', 'dateto']
day_index = make_day_index(year)
day_index['station'] = prcp.index[0][0]
day_index = day_index.set_index(keys)
ref = day_index.join(prcp)
ref['cum_prcp'] = np.nancumsum(ref['prcp_mm'].astype(float))
day_observed = ref['prcp_mm'].notnull()
cum_days_observed = np.cumsum(day_observed)
cum_days_available = np.arange(1, len(ref)+1)
ref['cum_fillrate'] = cum_days_observed / cum_days_available
ref['reference_prcp'] = ref['cum_prcp'] / ref['cum_fillrate']
# ref.at[ref['cum_fillrate'] < 0.8, 'reference_prcp'] = np.nan
return ref
def calc_reference_station(prcp: pd.DataFrame) -> pd.DataFrame:
years = np.arange(1981, 2010+1)
ref_list = []
for year in years:
ref_year = calc_reference_station_year(prcp, year)
ref_list.append(ref_year)
ref = pd.concat(ref_list, axis=0)
return ref
def reference_quantiles(reference: pd.DataFrame) -> pd.DataFrame:
qq = np.array([0.00, 0.25, 0.50, 0.75, 1.00])
cdf_prcp = ECDF()
cdf_fill = ECDF()
qlist = []
keys = ['station', 'day_index']
for gkeys, gref in reference.groupby(keys):
if gref.empty or gref['reference_prcp'].notnull().sum() == 0:
qprcp = np.full(5, np.nan)
qfill = np.full(5, np.nan)
else:
cdf_prcp.fit(gref['reference_prcp'], weights=gref['cum_fillrate'])
qprcp = cdf_prcp.quantile(qq)
cdf_fill.fit(gref['cum_fillrate'])
qfill = cdf_fill.quantile(qq)
row = (*gkeys, *qprcp, *qfill)
qlist.append(row)
cols = [
*keys,
'prcp_min',
'prcp_p25',
'prcp_p50',
'prcp_p75',
'prcp_max',
'fill_min',
'fill_p25',
'fill_p50',
'fill_p75',
'fill_max',
]
qdf = pd.DataFrame(qlist, columns=cols).set_index(keys)
return qdf
def calc_reference_quantiles(prcp: pd.DataFrame) -> pd.DataFrame:
"""Composition of calc_reference_station and reference_quantiles."""
# This makes sure that we do not use the reference dataset directly, just the quantiles
ref = calc_reference_station(prcp)
q = reference_quantiles(ref)
return q
def load_reference_quantiles(station: str, engine) -> pd.DataFrame:
"""Load reference quantiles from database."""
q = engine.execute(f"select * from reference where station='{station}'").fetchall()
cols = [
'station',
'day_index',
'prcp_min',
'prcp_p25',
'prcp_p50',
'prcp_p75',
'prcp_max',
'fill_min',
'fill_p25',
'fill_p50',
'fill_p75',
'fill_max',
]
df = pd.DataFrame(q, columns=cols).set_index(keys=['station', 'day_index'])
return df
def calc_cumprcp(prcp: pd.DataFrame, year: int) -> pd.DataFrame:
data_end_date = prcp.index.get_level_values('dateto')[-1]
cprcp = calc_reference_station_year(prcp, year) # reuse the same code as for the reference
cprcp.columns = ['year', 'day_index', 'prcp_mm', 'cum_prcp', 'cum_fillrate', 'ytd_prcp']
idx = pd.IndexSlice
return cprcp.loc[idx[:, :data_end_date], :]
def drought_index(cum_prcp: float, reference_cum_prcp: np.ndarray) -> float:
"""Calculate drought index from the cumulative precipitation and the reference values."""
cdf = ECDF()
cdf.fit(reference_cum_prcp)
curr_cdf = cdf.eval(np.array(cum_prcp))
curr_drought_index = 2 * (0.5 - curr_cdf)
return curr_drought_index
def current_drought_rate(refq: pd.DataFrame, curr_cprcp: pd.Series) -> float:
if refq.empty:
curr_drought_rate = np.nan
else:
curr_station = refq.index[0][0]
curr_day_index = curr_cprcp['day_index']
curr_ytd_prcp = curr_cprcp['ytd_prcp']
refq_columns = ['prcp_min', 'prcp_p25', 'prcp_p50', 'prcp_p75', 'prcp_max']
refq_prcp = refq.loc[(curr_station, curr_day_index), refq_columns].values
if len(refq_prcp) > 0:
curr_drought_rate = drought_index(curr_ytd_prcp, refq_prcp.flatten())
else:
curr_drought_rate = np.nan
return curr_drought_rate
def current_fillrate_cdf(refq: pd.DataFrame, curr_cprcp: pd.Series) -> float:
curr_station = refq.index[0][0]
curr_day_index = curr_cprcp['day_index']
curr_fillrate = curr_cprcp['cum_fillrate']
refq_columns = ['fill_min', 'fill_p25', 'fill_p50', 'fill_p75', 'fill_max']
ref_fillrate = refq.loc[(curr_station, curr_day_index), refq_columns].values
if len(ref_fillrate) > 0:
cdf = ECDF()
cdf.fit(ref_fillrate.flatten())
curr_fillrate_cdf = cdf.eval(curr_fillrate)
else:
curr_fillrate_cdf = np.nan
return curr_fillrate_cdf
def station_label(station: pd.Series) -> str:
coords = f"{station.latitude:.3f}, {station.longitude:.3f}, {station.elevation:.0f}"
stlabel = f"{station.continent_name}/{station.country_name}/{station.station_name} ({coords})"
return stlabel
def nice_ylim(y: float) -> float:
"""Guess the ylim which is proportional to the value."""
step = 10.0 ** np.round(np.log10(0.1*y))
ub = step * np.ceil(y / step)
return ub
def cum_prcp_plot_matplotlib(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_drought_rate: float
):
"""
Plot cumulative precipitation with Matplotlib. Deprecated in favor of cum_prcp_plot.
:param stlabel:
:param rdf:
:param cprcp:
:param curr_drought_rate:
:return:
"""
f = plt.figure(figsize=(12, 12))
if not rdf.empty and rdf['prcp_min'].notnull().sum() > 0:
prcp_ub = nice_ylim(rdf['prcp_max'].iloc[-1])
xx = rdf['dateto']
plt.fill_between(x=xx, y1=0, y2=rdf['prcp_min'], color='red', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_min'], y2=rdf['prcp_p25'], color='orange', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_p25'], y2=rdf['prcp_p75'], color='green', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_p75'], y2=rdf['prcp_max'], color='cyan', linewidth=0.0, alpha=0.5)
plt.fill_between(x=xx, y1=rdf['prcp_max'], y2=prcp_ub, color='blue', linewidth=0.0, alpha=0.5)
plt.plot(xx, rdf['prcp_p50'], c='grey')
if not cprcp.empty:
plt.plot(cprcp.index.get_level_values('dateto'), cprcp['ytd_prcp'], c='red', linewidth=3)
ax = plt.gca()
ax.set_title(f"{stlabel}: current drought rate is {100 * curr_drought_rate:.0f}%")
ax.set_ylabel('3rd year cumulative precipitation in mm')
ax.grid(True)
return f
def cum_prcp_plot(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_drought_rate: float
):
"""
Plot cumulative precipitation with Bokeh.
:param stlabel:
:param rdf:
:param cprcp:
:param curr_drought_rate:
:return:
"""
src_ref = ColumnDataSource(rdf)
src_cur = ColumnDataSource(cprcp.reset_index())
p = figure(
plot_width=800,
plot_height=800,
title=f"{stlabel}: current drought index is {100 * curr_drought_rate:.0f}%",
y_axis_label="3rd year cumulative precipitation in mm",
x_axis_type='datetime',
)
if not rdf.empty and rdf['prcp_min'].notnull().sum() > 0:
prcp_ub = nice_ylim(rdf['prcp_max'].iloc[-1])
amin = VArea(x="dateto", y1=0, y2="prcp_min", fill_color="red", fill_alpha=0.5)
ap25 = VArea(x="dateto", y1="prcp_min", y2="prcp_p25", fill_color="orange", fill_alpha=0.5)
ap50 = VArea(x="dateto", y1="prcp_p25", y2="prcp_p75", fill_color="green", fill_alpha=0.5)
ap75 = VArea(x="dateto", y1="prcp_p75", y2="prcp_max", fill_color="cyan", fill_alpha=0.5)
amax = VArea(x="dateto", y1="prcp_max", y2=prcp_ub, fill_color="blue", fill_alpha=0.5)
lp50 = Line(x="dateto", y="prcp_p50", line_color='grey', line_width=3)
p.add_glyph(src_ref, amin)
p.add_glyph(src_ref, ap25)
p.add_glyph(src_ref, ap50)
p.add_glyph(src_ref, ap75)
p.add_glyph(src_ref, amax)
rref = p.add_glyph(src_ref, lp50)
ttp_ref = [
("Date", "@dateto{%F}"),
("Day Index", "@day_index"),
("Precipitation min", "@prcp_min{0.}"),
("Precipitation p25", "@prcp_p25{0.}"),
("Precipitation p50", "@prcp_p50{0.}"),
("Precipitation p75", "@prcp_p75{0.}"),
("Precipitation max", "@prcp_max{0.}"),
]
hover_ref = HoverTool(renderers=[rref], tooltips=ttp_ref, formatters={"@dateto": "datetime"})
p.add_tools(hover_ref)
if not cprcp.empty:
lcur = Line(x='dateto', y='ytd_prcp', line_color='red', line_width=3)
rcur = p.add_glyph(src_cur, lcur)
ttp_cur = [
("Date", "@dateto{%F}"),
("Day Index", "@day_index"),
("Precipitation that day (mm)", "@prcp_mm{0.}"),
("Precipitation 3rd year cumulative observed (mm)", "@cum_prcp{0.}"),
("Fill rate 3rd year cumulative", "@cum_fillrate{0.000}"),
("Precipitation 3rd year cumulative predicted (mm)", "@ytd_prcp{0.}"),
]
hover_cur = HoverTool(renderers=[rcur], tooltips=ttp_cur, formatters={"@dateto": "datetime"})
p.add_tools(hover_cur)
return p
def cum_fillrate_plot(
stlabel: str,
rdf: pd.DataFrame,
cprcp: pd.DataFrame,
curr_fillrate: float,
curr_fillrate_cdf: float,
):
f = plt.figure(figsize=(16, 9))
if not cprcp.empty:
plt.plot(cprcp.index.get_level_values('dateto'), cprcp['cum_fillrate'], c='red', linewidth=3)
if not rdf.empty:
plt.fill_between(rdf['dateto'], y1=rdf['fill_min'], y2=rdf['fill_max'], color='lightgray', alpha=0.5)
plt.fill_between(rdf['dateto'], y1=rdf['fill_p25'], y2=rdf['fill_p75'], color='darkgray', alpha=0.5)
plt.plot(rdf['dateto'], rdf['fill_p50'], color='gray')
ax = plt.gca()
ax.set_ylim(0, 1)
title = f"{stlabel}: current fill rate is {curr_fillrate:.2f} which is {100 * curr_fillrate_cdf:.0f} percentile"
ax.set_title(title)
ax.set_ylabel('fill rate')
ax.grid(True)
return f
def totals_barchart_matplotlib(dfy: pd.DataFrame):
"""Deprecated in favor of totals_barchart."""
f = plt.figure(figsize=(12, 12))
ax = plt.gca()
ax.set_ylabel("annual precipitation in mm")
ax.set_title(f"Yearly precipitation totals")
if not dfy.empty:
xx = dfy['year'].values
yy = dfy['prcp_mm'].values / 10
dd = dfy['observed_days']
x1 = np.min(xx)
x2 = np.max(xx)
mx = np.mean(xx)
my = np.mean(yy)
plt.bar(xx, yy, width=0.8)
plt.step(xx, dd, c='red')
plt.plot([x1, x2], [my, my], color='blue')
ax.annotate(
f"{my:.0f}",
xy=(mx, my),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center',
va='bottom',
fontsize='x-large',
color='blue',
)
return f
def totals_barchart(df: pd.DataFrame):
dfy = df.copy()
dfy['prcp_mm'] = df.prcp_mm / 10
dfy['available_days'] = 365 + (dfy.year % 4 == 0)
dfy['fill_rate'] = dfy.observed_days / dfy.available_days
dfy['prcp_pred'] = dfy.prcp_mm / dfy.fill_rate
prcp_pred_mean = dfy.prcp_pred.mean()
dfy['prcp_pred_mean'] = prcp_pred_mean
f = figure(
plot_width=800,
plot_height=800,
title=f"Yearly precipitation totals, mean={prcp_pred_mean:.0f}mm",
y_axis_label="annual precipitation in mm",
)
if not dfy.empty:
src = ColumnDataSource(dfy)
bobs = VBar(x='year', top='prcp_mm', fill_color='blue', line_color='blue', width=0.8)
bpre = VBar(x='year', bottom='prcp_mm', top='prcp_pred', fill_color='lightblue', line_color='blue', width=0.8)
lpre = Line(x='year', y='prcp_pred_mean', line_color='darkblue', line_width=3)
f.add_glyph(src, bobs)
f.add_glyph(src, bpre)
f.add_glyph(src, lpre)
ttp = [
("Year", "@year"),
("Precipitation observed (mm)", "@prcp_mm{0.}"),
("Observed days", "@observed_days"),
("Available days", "@available_days"),
("Fill rate", "@fill_rate{0.000}"),
("Precipitation predicted (mm)", "@prcp_pred{0.}"),
]
hover_tool = HoverTool(tooltips=ttp)
f.add_tools(hover_tool)
return f
def drought_rate_data(stid: str, year: int, engine=None) -> tuple:
prcp = df_station(stid, engine)
if not prcp.empty:
if engine is None:
refq = calc_reference_quantiles(prcp)
else:
refq = load_reference_quantiles(stid, engine)
data_end_date = prcp.index.get_level_values('dateto')[-1]
day_index = make_day_index(year)
rdf = day_index.merge(refq, on='day_index', how='left') >> mask(X.dateto <= data_end_date)
if engine is None:
cprcp = calc_cumprcp(prcp, year)
else:
cprcp = calc_cumprcp(prcp, year) # TODO load from db
if not cprcp.empty:
curr_cprcp = cprcp.iloc[-1, :]
curr_fillrate = curr_cprcp['cum_fillrate']
if refq.empty:
curr_drought_rate = np.nan
curr_fillrate_cdf = np.nan
else:
curr_drought_rate = current_drought_rate(refq, curr_cprcp)
curr_fillrate_cdf = current_fillrate_cdf(refq, curr_cprcp)
else:
curr_drought_rate = np.nan
curr_fillrate = np.nan
curr_fillrate_cdf = np.nan
else:
rdf = pd.DataFrame()
cprcp = pd.DataFrame()
curr_drought_rate = np.nan
curr_fillrate = np.nan
curr_fillrate_cdf = np.nan
return rdf, cprcp, curr_drought_rate, curr_fillrate, curr_fillrate_cdf
def sql_engine():
engine = create_engine('postgres://postgres:@localhost/ghcn')
return engine
def get_stations_noref(engine, stations: pd.DataFrame) -> pd.DataFrame:
cols = ['station', 'dispatched_at', 'completed_at']
sql_noref = (
f"select {', '.join(cols)}\n"
"from reference_job\n"
"where completed_at is null"
)
station_noref = pd.DataFrame(engine.execute(sql_noref).fetchall(), columns=cols).set_index('station')
station_coord = station_noref.join(stations)
lat = station_coord.latitude
lon = station_coord.longitude
center_point = (49.9629345, 14.0600897) # x=14.0600897&y=49.9629345 = <NAME> 1005
station_coord['perimeter_km'] = np.array([geodesic(center_point, station_point).km for station_point in zip(lat, lon)])
return station_coord.sort_values(by='perimeter_km')
def ded_prcp(engine) -> date:
sql_query = (
"select max(dateto) as ded\n"
"from prcp"
)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['ded'])
data_end_dt = df['ded'].iat[0]
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"data_end_date={data_end_date}")
return data_end_date
def ded_cump(engine, year: int) -> tuple:
sql_query = (
"select max(dateto) as ded, max(day_index) as dei\n"
"from cumprcp\n"
f"where year={year}"
)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['ded', 'dei'])
data_end_dt = df['ded'].iat[0]
data_end_index = df['dei'].iat[0]
if data_end_dt is None:
data_end_date = None
else:
data_end_date = date(data_end_dt.year, data_end_dt.month, data_end_dt.day)
logging.debug(f"cump_end_date={data_end_date}")
return data_end_date, data_end_index
def prcp_dateto(engine, dateto: date) -> pd.DataFrame:
"""Select all rows from prcp table as of dateto."""
sql_query = f"select station, cast(prcp_mm as float) as prcp_mm from prcp where dateto=date'{dateto.isoformat()}'"
logging.debug(sql_query)
df = pd.DataFrame(engine.execute(sql_query).fetchall(), columns=['station', 'prcp_mm'])
return df
def increment_cumprcp(engine, year: int, day_index: int, dateto: date, cum_days_available: int):
"""Insert new records to cumprcp for the spacified day assuming that the previous day is there."""
cols_previous = ['station', 'year', 'day_index', 'dateto', 'cum_days_observed', 'cum_prcp']
sql_previous = f"select {', '.join(cols_previous)} from cumprcp where year={year} and day_index={day_index - 1}"
cumprcp_previous = pd.DataFrame(engine.execute(sql_previous).fetchall(), columns=cols_previous)
assert not cumprcp_previous.empty
prcp = prcp_dateto(engine, dateto)
cols_both = ['station', 'year']
cumprcp = cumprcp_previous[cols_both].merge(prcp, how='left', on='station')
cumprcp['day_index'] = day_index
cumprcp['dateto'] = dateto
cumprcp['flag_observed'] = cumprcp.prcp_mm.notnull()
cumprcp['cum_days_observed'] = cumprcp_previous.cum_days_observed + cumprcp.flag_observed
cumprcp['cum_fillrate'] = cumprcp.cum_days_observed / cum_days_available
cumprcp['cum_prcp'] = cumprcp_previous.cum_prcp + cumprcp.prcp_mm.fillna(0)
cumprcp['cum_prcp_pred'] = cumprcp.cum_prcp / cumprcp.cum_fillrate
cols_out = [
'station',
'year',
'day_index',
'dateto',
'flag_observed',
'cum_days_observed',
'cum_fillrate',
'cum_prcp',
'cum_prcp_pred',
]
insert_with_progress(cumprcp[cols_out], engine, table_name='cumprcp', reset_index=False)
def update_cumprcp(engine):
"""Update table cumprcp if new data is available in prcp table."""
stations = load_stations()
prcp_end_date = ded_prcp(engine)
year = prcp_end_date.year
day_index = make_day_index(year)
first_day_index = day_index['day_index'].iat[0]
cump_end_date, cump_end_index = ded_cump(engine, year)
if cump_end_date is None: # create new year skeleton
dateto0 = day_index['dateto'].iat[0]
logging.debug(dateto0)
prcp0 = prcp_dateto(engine, dateto0)
cump0 = stations >> left_join(prcp0, by='station')
flag_observed = cump0.prcp_mm.notnull()
skeleton = pd.DataFrame({
'station': stations.index,
'year': year,
'day_index': first_day_index,
'dateto': dateto0,
'flag_observed': flag_observed,
'cum_days_observed': flag_observed.astype(int),
'cum_fillrate': flag_observed.astype(float),
'cum_prcp': cump0.prcp_mm.fillna(0),
'cum_prcp_pred': cump0.prcp_mm,
})
insert_with_progress(skeleton, engine, table_name='cumprcp', reset_index=False)
cump_end_date = dateto0
day_index_todo = day_index.loc[(day_index.dateto > cump_end_date) & (day_index.dateto <= prcp_end_date), :]
for x in day_index_todo.itertuples():
logging.debug(x)
cum_days_available = x.day_index - first_day_index + 1
increment_cumprcp(engine, x.year, x.day_index, x.dateto, cum_days_available)
logging.debug("completed")
def do_worker_job(engine, station_id: str):
if station_id:
prcp = df_station(station_id)
if not prcp.empty:
refq = calc_reference_quantiles(prcp)
if not refq.empty:
insert_with_progress(refq, engine, table_name='reference', chunksize=2000)
return
def make_station_tree(stations: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""All nodes of the stations tree are searcheable in the autocomplete by node_name."""
def node_name_station(station_record: NamedTuple) -> str:
name = f"{station_record.station_name} (station in {station_record.country_name})"
return name
def node_name_country(station_record: NamedTuple) -> str:
name = f"{station_record.country_name} (country in {station_record.continent_name})"
return name
station_name = pd.Series([node_name_station(x) for x in stations.itertuples()])
if station_name.is_unique:
logging.debug("tree nodes are unique")
else:
logging.error("tree nodes are not unique")
freq = station_name.value_counts()
ndup = np.sum(freq > 1)
logging.debug(f"{ndup} duplicated names")
for index, value in tqdm(freq[:ndup].iteritems(), total=ndup):
# logging.debug(f"{index}: {value}x")
# deduplication - add i/n at the end of each name
dupidx = np.flatnonzero(station_name == index)
for i, (idx, ndname) in enumerate(station_name.iloc[dupidx].iteritems()):
# logging.debug(f"{idx}: {ndname} {i+1}/{value}")
station_name.at[idx] = f"{ndname} {i+1}/{value}"
country_name = pd.Series([node_name_country(x) for x in stations.itertuples()])
continent_name = stations['continent_name']
node_name = | pd.concat([station_name, country_name, continent_name], axis=0) | pandas.concat |
from IPython.core.error import UsageError
from mock import MagicMock
import numpy as np
from nose.tools import assert_equals, assert_is
import pandas as pd
from pandas.testing import assert_frame_equal
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from sparkmagic.utils.utils import parse_argstring_or_throw, records_to_dataframe
from sparkmagic.utils.constants import SESSION_KIND_PYSPARK
from sparkmagic.utils.dataframe_parser import (
DataframeHtmlParser,
cell_contains_dataframe,
CellComponentType,
cell_components_iter,
CellOutputHtmlParser,
)
import unittest
def test_parse_argstring_or_throw():
parse_argstring = MagicMock(side_effect=UsageError("OOGABOOGABOOGA"))
try:
parse_argstring_or_throw(
MagicMock(), MagicMock(), parse_argstring=parse_argstring
)
assert False
except BadUserDataException as e:
assert_equals(str(e), str(parse_argstring.side_effect))
parse_argstring = MagicMock(side_effect=ValueError("AN UNKNOWN ERROR HAPPENED"))
try:
parse_argstring_or_throw(
MagicMock(), MagicMock(), parse_argstring=parse_argstring
)
assert False
except ValueError as e:
assert_is(e, parse_argstring.side_effect)
def test_records_to_dataframe_missing_value_first():
result = """{"z":100, "y":50}
{"z":25, "nullv":1.0, "y":10}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame(
[{"z": 100, "nullv": None, "y": 50}, {"z": 25, "nullv": 1, "y": 10}],
columns=["z", "nullv", "y"],
)
assert_frame_equal(expected, df)
def test_records_to_dataframe_coercing():
result = """{"z":"100", "y":"2016-01-01"}
{"z":"25", "y":"2016-01-01"}"""
df = records_to_dataframe(result, SESSION_KIND_PYSPARK, True)
expected = pd.DataFrame(
[
{"z": 100, "y": np.datetime64("2016-01-01")},
{"z": 25, "y": np.datetime64("2016-01-01")},
],
columns=["z", "y"],
)
| assert_frame_equal(expected, df) | pandas.testing.assert_frame_equal |
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = | pd.Series([1, 3, 3, 2, 1]) | pandas.Series |
# -*- coding: utf-8 -*-
# Copyright StateOfTheArt.quant.
#
# * Commercial Usage: please contact <EMAIL>
# * Non-Commercial Usage:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.stats import rankdata
import torch
import numpy as np
import pandas as pd
from featurizer.functions.algebra_statistic import weighted_average, weighted_std, downside_std, upside_std
import pdb
# https://stackoverflow.com/questions/14313510/how-to-calculate-moving-average-using-numpy
def rolling_sum(tensor, window=1, dim=0):
ret = torch.cumsum(tensor, dim=dim)
ret[window:] = ret[window:] - ret[:-window]
ret[:window-1]= float("nan")
return ret
def rolling_sum_(tensor, window=1, dim=0):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).sum()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_sum3d(tensor, window=1, dim=1):
ret = torch.cumsum(tensor, dim=dim)
ret[:,window:] = ret[:,window:] - ret[:,:-window]
ret[:,:window-1]= float("nan")
return ret
def rolling_mean(tensor, window=1):
#to-do fixme
#ret = torch.cumsum(tensor, dim=0)
#ret[window:] = ret[window:] - ret[:-window]
#ret[:window-1]= float("nan")
#output = ret/window
return rolling_mean_(tensor=tensor, window=window)
def rolling_mean_(tensor, window=1):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).mean()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_weighted_mean(tensor, window=1, halflife=90):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).apply(lambda x: weighted_average(x,halflife=halflife))
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
# https://stackoverflow.com/questions/30399534/shift-elements-in-a-numpy-array
def shift(tensor, window=1):
if window == 0:
return tensor
e = torch.empty_like(tensor, dtype=tensor.dtype, device=tensor.device)
if window > 0:
e[:window] = float("nan")
e[window:] = tensor[:-window]
else:
e[window:] = float("nan")
e[:window] = tensor[-window:]
return e
def diff(tensor, period=1):
shiftd_tensor = shift(tensor, window=period)
diff = tensor - shiftd_tensor
return diff
def pct_change(tensor, period=1):
shiftd_tensor = shift(tensor, window=period)
diff = tensor - shiftd_tensor
output = diff.div(shiftd_tensor)
return output
#https://stackoverflow.com/questions/54564253/how-to-calculate-the-cumulative-product-of-a-rolling-window-in-pandas
def rolling_prod(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).apply(np.prod)
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_var(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = pd.DataFrame(tensor_np)
output_df = tensor_df.rolling(window).var()
output_tensor = torch.tensor(output_df.values, dtype=tensor.dtype, device=tensor.device)
return output_tensor
def rolling_std(tensor, window):
tensor_np = tensor.cpu().detach().numpy()
tensor_df = | pd.DataFrame(tensor_np) | pandas.DataFrame |
import sqlite3
import pandas as pd
import numpy as np
def save_results(env, agent, history, reward, scenario=None, agent_name=None, notes=None):
conn = sqlite3.connect('gym_battery_database.db')
result = conn.execute('SELECT MAX(scenario_id) FROM grid_flow_output;')
scenario_id = int(result.fetchone()[0]) + 1
if scenario is None:
scenario = input("Enter the scenario name (i.e. the load used): ")
if agent is None:
scenario = input("Enter the agent name, or y to accept {}: ".format(agent.name))
if scenario.lower() == 'y':
scenario = agent.name
if notes is None:
notes = input("Consider adding any notes: ")
saved_time = pd.Timestamp.now()
# Save the final grid_flow using entirely greedy policy
DF = env.grid_flow.copy()
DF['agent_state'] = [tuple(agent.discretize_space(np.array(s))) for s in DF.state]
agent_state_hash_table = {hash(s): s for s in DF.agent_state}
DF.agent_state = [hash(s) for s in DF.agent_state]
state_hash_table = {hash(s): s for s in DF.state}
DF.state = [hash(s) for s in DF.state]
DF['reward'] = reward
DF['agent'] = agent_name
DF['scenario'] = scenario
DF['episodes'] = len(history)
DF['notes'] = notes
DF['scenario_id'] = scenario_id
DF['saved_timestamp'] = saved_time
DF.to_sql('grid_flow_output', conn, if_exists='append')
# Save the state-action value estimates
val = agent.S_A_values.copy()
val = pd.DataFrame.from_dict(val, orient='index')
val = val.reset_index()
val['state'] = [[i.level_0, i.level_1, i.level_2, i.level_3] for ix, i in val.iterrows()]
val = val.rename(columns={"state": "agent_state"})
val.index = val.agent_state
val = val.drop(columns=['level_0', 'level_1', 'level_2', 'level_3', 'agent_state'])
val.index = [tuple(x) for x in val.index]
add_agent_state_hash = {hash(s): s for s in val.index if hash(s) not in agent_state_hash_table.keys()}
agent_state_hash_table.update(add_agent_state_hash)
val.index = [hash(s) for s in val.index]
val['agent'] = agent_name
val['scenario'] = scenario
val['scenario_id'] = scenario_id
val['saved_timestamp'] = saved_time
val.to_sql('state_action_values', conn, if_exists='append')
agent_state_hash_DF = pd.DataFrame.from_dict(agent_state_hash_table, orient='index',
columns=['hour', 'charge', 'load', 'demand'])
agent_state_hash_DF['saved_timestamp'] = saved_time
agent_state_hash_DF['state'] = agent_state_hash_DF.index
try:
agent_state_hash_DF = pd.read_sql('SELECT * FROM agent_states_hash;', conn).append(agent_state_hash_DF)
except:
print("Error reading in agent state hash table. Is this the first time you're running it?")
agent_state_hash_DF.drop_duplicates(subset='state', inplace=True)
agent_state_hash_DF.reset_index(drop=True, inplace=True)
agent_state_hash_DF.saved_timestamp = pd.to_datetime(agent_state_hash_DF.saved_timestamp)
# conn.execute("DROP TABLE agent_states_hash;")
try:
agent_state_hash_DF.to_sql('agent_states_hash', conn, if_exists='replace', index=False)
except:
print("returning DF")
return agent_state_hash_DF
state_hash_DF = pd.DataFrame.from_dict(state_hash_table, orient='index',
columns=['hour', 'charge', 'load', 'demand'])
state_hash_DF['saved_timestamp'] = saved_time
state_hash_DF['state'] = state_hash_DF.index
try:
state_hash_DF = pd.read_sql('SELECT * FROM states_hash;', conn).append(state_hash_DF)
except:
print("Error reading in state hash table. Is this the first time you're running it?")
state_hash_DF.drop_duplicates(subset='state', inplace=True)
state_hash_DF.reset_index(drop=True, inplace=True)
state_hash_DF.saved_timestamp = pd.to_datetime(state_hash_DF.saved_timestamp)
state_hash_DF.to_sql('states_hash', conn, if_exists='replace', index=False)
# Save the history of performance by episode
df_history = | pd.DataFrame(history, columns=['episode_cnt', 'reward', 'new_demand', 'orig_reward', 'orig_demand']) | pandas.DataFrame |
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(Timedelta(seconds=0))
expected = Series(
[
timedelta(0),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# interpreted as seconds, no longer supported
msg = "value should be a 'Timedelta', 'NaT', or array of those. Got 'int'"
with pytest.raises(TypeError, match=msg):
obj.fillna(1)
result = obj.fillna(Timedelta(seconds=1))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(timedelta(days=1, seconds=1))
expected = Series(
[
timedelta(days=1, seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(np.timedelta64(10 ** 9))
expected = Series(
[
timedelta(seconds=1),
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
]
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
result = obj.fillna(NaT)
expected = Series(
[
NaT,
timedelta(0),
timedelta(1),
timedelta(days=1, seconds=9 * 3600 + 60 + 1),
],
dtype="m8[ns]",
)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# ffill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.ffill()
expected = td.fillna(Timedelta(seconds=0))
expected[0] = np.nan
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
# bfill
td[2] = np.nan
obj = frame_or_series(td)
result = obj.bfill()
expected = td.fillna(Timedelta(seconds=0))
expected[2] = timedelta(days=1, seconds=9 * 3600 + 60 + 1)
expected = frame_or_series(expected)
tm.assert_equal(result, expected)
def test_datetime64_fillna(self):
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
ser[2] = np.nan
# ffill
result = ser.ffill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
# bfill
result = ser.bfill()
expected = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130103 9:01:01"),
Timestamp("20130103 9:01:01"),
]
)
tm.assert_series_equal(result, expected)
def test_datetime64_fillna_backfill(self):
# GH#6587
# make sure that we are treating as integer when filling
msg = "containing strings is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
# this also tests inference of a datetime-like with NaT's
ser = Series([NaT, NaT, "2013-08-05 15:30:00.000001"])
expected = Series(
[
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
"2013-08-05 15:30:00.000001",
],
dtype="M8[ns]",
)
result = ser.fillna(method="backfill")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])
def test_datetime64_tz_fillna(self, tz):
# DatetimeLikeBlock
ser = Series(
[
Timestamp("2011-01-01 10:00"),
NaT,
Timestamp("2011-01-03 10:00"),
NaT,
]
)
null_loc = | Series([False, True, False, True]) | pandas.Series |
# county level symptoms map for Sweden
import csv
import json
import os
import pandas as pd
import plotly.express as px
import requests
base_path = os.getenv("PYTHONPATH", ".")
# map
with open(f"{base_path}/sweden-counties.geojson", "r") as sw:
jdata = json.load(sw)
# dictionary to match data and map
counties_id_map = {}
for feature in jdata["features"]:
feature["id"] = feature["properties"]["cartodb_id"]
counties_id_map[feature["properties"]["name"]] = feature["id"]
# data
req = requests.get(
"https://blobserver.dckube.scilifelab.se/blob/CSSS_estimates_mostrecent.csv"
)
reader = csv.reader(req.text.splitlines())
data = list(reader)[-21:]
df1 = pd.DataFrame(
data[0:], columns=["Lan", "Datum", "Uppskattning", "Low_CI", "High_CI"]
)
# format data
df1["Datum"] = pd.to_datetime(df1["Datum"])
df1.sort_values(by="Datum", ascending=False, inplace=True)
df1.drop_duplicates("Lan", keep="first", inplace=True)
df1["Uppskattning"] = | pd.to_numeric(df1["Uppskattning"], errors="coerce") | pandas.to_numeric |
# Copyright 2018 BBVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import awscosts
import pandas as pd
import datetime
import numpy as np
def simulate(df: pd.DataFrame, monthly_scale_factor=None):
""" Builds a synthetic month of requests using an input DataFrame
Using a dataframe with a date index, collapses the whole dataframe to build
a synthetic month. The original dataframe can have an arbitrary number of
rows. The more rows, the longer timespan (i.e. months, years...) to compute
average values per hour un a month. The resulting dataframe has a column
with the accumulative sum of the previous rows.
Args:
df (pandas.DataFrame): Dataframe (datetime index) with requests in a
given period. Needs a column called 'hits'.
monthly_scale_factor (int): factor to multiply to the normalized
requests values in each row of the requests DataFrame. Normally
it's the total number of requests in a month.
Returns:
Synthetic 30-day DataFrame (1 hour per row) with requests
"""
# prepare DF fields
df['hits'] = df['hits'].astype(float)
df['weekday'] = df.index.weekday_name
df['hour'] = df.index.hour
startdate = datetime.datetime(2018, 1, 7)
days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday',
'Friday', 'Saturday', 'Sunday')
week_df = pd.DataFrame()
for day in days:
aux_df = pd.DataFrame(
index=pd.date_range(start=startdate, periods=24, freq='H'),
columns=['requests']
)
# Create a list of average hits for each hour in a given weekday:
hitmeans = df.loc[df['weekday'] == day].groupby('hour')['hits'].mean()
aux_df['requests'] = np.array(hitmeans).round().astype(int)
startdate += datetime.timedelta(days=1)
week_df = | pd.concat([week_df, aux_df]) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# # EDA + CenterNet Baseline
#
# References:
# * Took 3D visualization code from https://www.kaggle.com/zstusnoopy/visualize-the-location-and-3d-bounding-box-of-car
# * CenterNet paper https://arxiv.org/pdf/1904.07850.pdf
# * CenterNet repository https://github.com/xingyizhou/CenterNet
# # What is this competition about?
# 1. You are given the images taken from the roof of a car
# * ~4k training images
# * Always the same car and the same camera
# 2. You are asked to detect other cars on that image
# * There can be many cars
# * You need to predict their positions
# 
#
# ## What is in this notebook?
# * Data distributions: 1D, 2D and 3D
# * Functions to transform between camera coordinates and road coordinates
# * Simple CenterNet baseline
#
# ## CenterNet
# This architecture predicts centers of objects as a heatmap.
# It predicts sizes of the boxes as a regression task.
# 
#
# It is also used for pose estimation:
# 
# *(images from the [original repository](https://github.com/xingyizhou/CenterNet))*
# Coordinates of human joints are also predicted using regression.
#
# I use this idea to predict `x, y, z` coordinates of the vehicle and also `yaw, pitch_cos, pitch_sin, roll` angles.
# For `pitch` I predict sin and cos, because, as we will see, this angle can be both near 0 and near 3.14.
# These 7 parameters are my regression target variables instead of `shift_x, shift_y, size_x, size_y`.
# In[2]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from sklearn.model_selection import train_test_split
from scipy.optimize import minimize
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
PATH = '/home/hy/pkuad/'
os.listdir(PATH)
# # Load data
# In[3]:
train = pd.read_csv(PATH + 'train.csv')
test = | pd.read_csv(PATH + 'sample_submission.csv') | pandas.read_csv |
import pandas as pd
import datetime
from pandas import DataFrame
from pandasql import sqldf
loc = locals()
def calculate_average_ticker_price(prices: {}, total_quantity: float) -> float:
"""
:param prices: a list of price * quantity needed to calculate the average price of each stock
:param total_quantity: the total amount of the stock held, required to calculate the average price per stock.
:return: the average price for that particular ticker stock given
1. the different purchase price
2. the different quantities purchased
"""
if total_quantity > 0:
total_price = sum(prices)
return total_price / total_quantity
def strip_action(action: str) -> str:
"""
removes whitespace and changes all characters to lower case
:param action: the name of the action taken on a position
:return: the input string minus the above mentioned
"""
action = action.replace(" ", "")
action = action.casefold()
return action
def profit_from_sale(ticker_number: str, sale_price: float, quantity: float, action: str) -> float:
"""
Calculates the amount of profit/loss realised from a sale of a stock.
:param ticker_number: ticker name of the stock
:param sale_price: sale/cover price of the stock
:param quantity: the number of stock sold/bought.
:param action: is this position a "longsell" or a "shortcover"
:return: profit/loss of the action taken
"""
if action == "longsell":
profit_or_loss = (sale_price - price_list[ticker_number]) * quantity
return profit_or_loss
elif action == "shortcover":
profit_or_loss = (price_list[ticker_number] - sale_price) * quantity
return profit_or_loss
def date_remove_time(date: datetime) -> datetime:
"""
converts a datetime format of %Y-%m-%d %H:%M:%S.%f to %d/%m/%Y
:param date: date
:return: a cleaner date without the above mentioned
"""
return datetime.datetime.strptime(date,'%Y-%m-%d %H:%M:%S.%f').strftime('%d/%m/%Y')
myportfolio = pd.read_excel('portfoliodataset.xlsx', index_col=False)
# PANDAS SETTINGS
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
#!/usr/bin/python
print('Loading modules...')
import os, sys, getopt, datetime
import pickle as pkl
import pandas as pd
import numpy as np
from xgboost import XGBRegressor, XGBClassifier
from dairyml import XGBCombined
from skll.metrics import spearman, pearson
from sklearn.utils import shuffle
from sklearn.model_selection import cross_validate, RepeatedKFold
from sklearn.metrics import r2_score, make_scorer, mean_absolute_error
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import IsolationForest
from scoring import *
import warnings
warnings.filterwarnings("ignore")
results_dir = '../reports/'
if not os.path.exists(results_dir):
os.makedirs(results_dir)
def my_load_model(model_path):
print('Loading model at {}'.format(model_path))
if 'ffnn' in model_path:
from tensorflow.keras.models import load_model
model = load_model(model_path)
else:
with open(model_path, "rb" ) as f:
model = pkl.load(f)
return model
def scale_data(data):
print('Scaling input features...')
train_means = np.loadtxt('./scaling/train_feature_means.csv',delimiter=',')
train_vars = np.loadtxt('./scaling/train_feature_variances.csv',delimiter=',')
scaled_data = (data - train_means) / np.sqrt(train_vars)
return scaled_data
def get_model_predictions(model,X):
# Get model predictions
print('Testing the model... ')
# full predictions
predictions = | pd.DataFrame(index=X.index) | pandas.DataFrame |
"""
timedelta support tools
"""
import re
from datetime import timedelta
import numpy as np
import pandas.tslib as tslib
from pandas import compat, _np_version_under1p7
from pandas.core.common import (ABCSeries, is_integer, is_integer_dtype, is_timedelta64_dtype,
_values_from_object, is_list_like, isnull)
repr_timedelta = tslib.repr_timedelta64
repr_timedelta64 = tslib.repr_timedelta64
def to_timedelta(arg, box=True, unit='ns'):
"""
Convert argument to timedelta
Parameters
----------
arg : string, timedelta, array of strings (with possible NAs)
box : boolean, default True
If True returns a Series of the results, if False returns ndarray of values
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, which is an integer/float number
Returns
-------
ret : timedelta64/arrays of timedelta64 if parsing succeeded
"""
if _np_version_under1p7:
raise ValueError("to_timedelta is not support for numpy < 1.7")
def _convert_listlike(arg, box, unit):
if isinstance(arg, (list,tuple)):
arg = np.array(arg, dtype='O')
if | is_timedelta64_dtype(arg) | pandas.core.common.is_timedelta64_dtype |
# -*- coding: utf-8 -*-
"""
Created on Sat May 5 00:27:52 2018
@author: sindu
About: Feature Selection on Genome Data"""
import pandas as pd
import numpy as np
import math
import operator
from sklearn import metrics
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn import linear_model
filename = 'GenomeTrainXY.txt'
data = pd.read_csv('GenomeTrainXY.txt', header=-1).as_matrix()
testDataFile = "GenomeTestX.txt"
testData = pd.read_csv("GenomeTestX.txt", header=-1).as_matrix()
headerinfo = data[0]
classlabelinfo = list(set(headerinfo))
clbl, clblcnt = np.unique(headerinfo, return_counts=True)
classlabelcountinfo = dict(zip(clbl, clblcnt))
n_genomesize = len(headerinfo)
k_groupsize = len(clbl)
df = pd.DataFrame(data)
dftranspose = df.transpose()
fscores = pd.DataFrame()
fscorenumval = None
fscoredenom = None
fscorenumdf = pd.DataFrame()
fscoredenomdf = pd.DataFrame()
#calculate mean of all features for a specific class label
featuremeandata = df.transpose().groupby(dftranspose[:][0]).mean()
featuremeandata = featuremeandata.loc[:, 1:]
centroidData = featuremeandata.transpose().as_matrix()
#calculate variance of all features for a specific class label
featurevardata = df.transpose().groupby(dftranspose[:][0]).var()
featurevardata = featurevardata.loc[:, 1:]
#calculate average of each of the feature
featureavg = df.mean(axis=1)
featureavgdata = pd.DataFrame(featureavg).transpose()
featureavgdata = featureavgdata.loc[:, 1:]
def getfeaturemeandata(classlblval, val):
meanrowdata = pd.DataFrame()
meanrowdatabyvalue = pd.DataFrame()
meannumdata = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Original Code by <NAME> for VOST Portugal
# 18 MAR 2022
# -----------------------------------------------
# LIBRARIES
# -----------------------------------------------
# Import Dash and Dash Bootstrap Components
import dash
import dash_bootstrap_components as dbc
from dash import Input, Output, dcc, html
# Import Core Libraries
import pandas as pd
import plotly.express as px
# -----------------------------------------------
# APP STARTS HERE
# -----------------------------------------------
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP], title='CONFIRM - BAJATT 2022', update_title=None,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=0.7, minimum-scale=0.4"}],
)
server = app.server
CONFIRM_LOGO = app.get_asset_url('CONFIRM_Logotype.png')
color_map = {
"WARNING":"#C81D25",
"ACIDENTE":"#4F5D75",
"AVARIA MECÂNICA":"#DE6E4B",
"DESISTÊNCIA CONFIRMADA":"#2D3142",
"DESISTÊNCIA NÃO CONFIRMADA":"#242424"
}
app.layout = dbc.Container(
[
dbc.Row(
[
# AUTOMATIC UPDATER
dcc.Interval(
id='interval-component',
interval=20*1000, # in milliseconds
n_intervals=0
),
dbc.Col(
[
dbc.Row(
[
dbc.Row(html.Hr()),
dbc.Col(width=2,xs=12, sm=12,md=1,lg=1,xl=1),
dbc.Col(html.H3("BAJA TT 2022"),width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(width=4,xs=12, sm=12,md=1,lg=4,xl=4),
dbc.Col(html.Img(src=CONFIRM_LOGO, height="37px"),width=2,xs=12, sm=12,md=1,lg=1,xl=1), # CONFIRM LOGO - DO NOT REMOVE
],
),
],
),
dbc.Row(
[
dbc.Col(width=2,xs=12, sm=12,md=1,lg=2,xl=1),
dbc.Col(
html.P("CONFIRM by VOST PORTUGAL ")
),
],
),
],
style={"height": "20%", "background-color": "#1D1E2C"},
),
dbc.Row(
[
dbc.Col(
dcc.Graph(id='map'), width=2,xs=12, sm=12,md=12,lg=12,xl=4,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("TOTAL INCIDENTS", style={"background": "#FF495C","color":"white"}),
dbc.CardBody(
[
html.H6("TOTAL INCIDENTES", style={"color":"#FF495C"}, className="card-title"),
html.H4(id="totals"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("TOTAL WARNINGS", style={"background": "#C81D25","color":"white"}),
dbc.CardBody(
[
html.H6("RACE DIRECTOR", style={"color":"#C81D25"}, className="card-title"),
html.H4(id="total_warnings"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("BREAKDOWNS", style={"background": "#DE6E4B","color":"white"}),
dbc.CardBody(
[
html.H6("AVARIAS", style={"color":"#DE6E4B"}, className="card-title"),
html.H4(id="total_breakdowns"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(
[
dbc.Card(
[
dbc.CardHeader("ACCIDENTS", style={"background": "#4F5D75","color":"white"}),
dbc.CardBody(
[
html.H6("ACIDENTES", style={"color":"#4F5D75"}, className="card-title"),
html.H4(id="total_accidents"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("CONFIRMED OUT OF RACE", style={"background": "#2D3142","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA", style={"color":"#2D3142"}, className="card-title"),
html.H4(id="total_gaveup_confirmed"),
],
),
],
),
dbc.Card(
[
dbc.CardHeader("NON-CONFIRMED OUT OF RACE", style={"background": "#242424","color":"white"}),
dbc.CardBody(
[
html.H6("DESISTÊNCIA NC", style={"color":"#242424"}, className="card-title"),
html.H4(id="total_gaveup_nconfirmed"),
],
),
],
),
],
),
width=2,xs=12, sm=12,md=12,lg=6,xl=2,
),
dbc.Col(
dbc.Row(dcc.Graph(id='pie')),
width=3,xs=12, sm=12,md=12,lg=12,xl=3,
),
],
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(dcc.Graph(id='timeline'))
],
),
],
style={"height": "10%", "background-color": "#242424"},
),
dbc.Row(
[
dbc.Col(
[
dbc.Row(
[
dbc.Col(width=4,xs=12, sm=12,md=4,lg=4,xl=4),
dbc.Col(
dbc.Row(
[
dbc.Row(dbc.Col(width=12),),
dbc.Row(html.H6("POWERED BY VOST PORTUGAL",style={"align":"center"}),),
dbc.Row(html.H6("VOST PORTUGAL for ACP MOTORSPORTS",style={"align":"center"}),),
dbc.Row(html.H6("CC BY-NC-SA 2022",style={"align":"center"}),),
],
),
),
],
style={"height": "20%", "background-color": "#242424"},
),
],
),
],
style={"height": "30%", "background-color": "#242424"},
),
],
style={"width":"100vw","height": "97vh"},
)
# DEFINE CALL BACKS
@app.callback(
Output(component_id="map",component_property="figure"),
Output(component_id="totals",component_property="children"),
Output(component_id="total_warnings",component_property="children"), # returns variable
Output(component_id="total_breakdowns",component_property="children"),
Output(component_id="total_accidents",component_property="children"),
Output(component_id="total_gaveup_confirmed",component_property="children"), # returns variable
Output(component_id="total_gaveup_nconfirmed",component_property="children"), # returns table # returns table
Output(component_id="pie",component_property="figure"),
Output(component_id="timeline",component_property="figure"),
Input(component_id="interval-component", component_property="n_intervals"), # Triggers Call Back based on time update
)
# WHAT HAPPENS WHEN CALL BACK IS TRIGGERED
def confirmUupdate(value):
# DATA TREATMENT
df_ss1_cc = pd.read_csv('ss1_cc.csv')
df_live_incidents = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=812677681&single=true&output=csv')
df_live_cc = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=1268287201&single=true&output=csv')
df_live_warnings = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-<KEY>_/pub?gid=1026955157&single=true&output=csv')
df_live_incidents = df_live_incidents.dropna()
df_live_cc = df_live_cc.dropna()
df_live_warnings = df_live_warnings.dropna()
totals = str(round(df_live_incidents['total_incidents'].sum()))
total_warnings = df_live_warnings.loc[df_live_warnings['type'] == "WARNING", 'count'].sum()
total_breakdowns = df_live_incidents.loc[df_live_incidents['type'] == "AVARIA MECÂNICA", 'total_incidents'].sum()
total_accidents = df_live_incidents.loc[df_live_incidents['type'] == "ACIDENTE", 'total_incidents'].sum()
total_gaveup_confirmed = df_live_incidents.loc[df_live_incidents['type'] == "DESISTÊNCIA CONFIRMADA", 'total_incidents'].sum()
total_gaveup_nconfirmed = df_live_incidents.loc[df_live_incidents['type'] == "DESISTÊNCIA NÃO CONFIRMADA", 'total_incidents'].sum()
fig = px.scatter_mapbox(df_live_cc, lat="lat", lon="lon", size='reports',hover_name="reporter", hover_data=["reports"],
color_discrete_sequence=["red"], zoom=8)
fig.update_layout(mapbox_style="carto-darkmatter")
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
pie_chart = px.pie(df_live_incidents,names='type',values='total_incidents',hole=0.6,color='type',color_discrete_map=color_map)
pie_chart.update_layout(showlegend=False)
# TIMELINE
df_tl = pd.read_csv('https://docs.google.com/spreadsheets/d/e/2PACX-1vT_L10XsTy6OEUN6OOOdEbLDeMzAW000x2bmgXF5acnOY6v8lJpooMiOg4uFQ3e3CI2MfFdDB07I5X_/pub?gid=516767329&single=true&output=csv')
df_tl=df_tl.dropna()
df_tl['timestamp']= | pd.to_datetime(df_tl['timestamp']) | pandas.to_datetime |
import numpy as np
import pandas
import random
import re
import sys
from scipy.stats import pearsonr, spearmanr
# ausiliary functions
def buildSeriesByCategory(df, categories):
res = []
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
res.append(occ)
res_series = pandas.Series(res, index=categories)
return res_series
# convert dataframe that can be used to generate likert plot
def convertToRLikertFormat(dataframe):
# get rows from dataframe
version_series = dataframe.loc["Versioning"]
manual_series = dataframe.loc["Manual-Job"]
retry_series = dataframe.loc["Job-Retry"]
allow_series = dataframe.loc["Job-Allow-Failure"]
overall_series = dataframe.loc["overall"]
# convert to R format and fill the dictionary
dict_of_columns = {}
dict_of_columns['Fuzzy Version'] = fillList(version_series)
dict_of_columns['Manual Execution'] = fillList(manual_series)
dict_of_columns['Retry Failure'] = fillList(retry_series)
dict_of_columns['Fake Success'] = fillList(allow_series)
dict_of_columns['Overall'] = fillList(overall_series)
# merge everything in one dataframe
result = pandas.DataFrame(dict_of_columns)
return result
def fillList(series):
list = []
for label, value in series.items():
if label != "reacted":
num = value
while num > 0:
list.append(label)
num = num - 1
return pandas.Series(list)
def printStatsByCategory(df, categories):
for cat in categories:
occ = df.loc[df["category"] == cat].shape[0]
print(cat + ": " + str(occ))
def buildOverallResultSeries(resulting_series):
res_list = []
for label, serie in resulting_series.items():
sum = 0;
for value in serie:
sum += value
res_list.append(sum)
return res_list
def fillTwoLists(typ, cat, labels_string, cat_list, label_list):
labels = labels_string.split(",")
for label in labels:
if label != 'nan':
cat_list.append(cat)
label_list.append(typ + ":" + label.strip())
def computeProjectSet(list, splitRule, categories):
resulting_set = set()
base_link = "https://gitlab.com"
for name in list:
if splitRule != "":
splittedName = name.split(splitRule)
new_name = base_link
for token in splittedName:
if token in categories:
break
else:
token = re.sub(r"[ <>#%\"{}|^'`;\[\]/?:@&=+$,\.()\\\\]", my_replace, token)
new_name += "/" + token
resulting_set.add(new_name)
return resulting_set
def my_replace(match):
return "-"
def printOccurrences(antipatterns, tot_smells, tot_ana_projects, tot_ana_owners):
num_smells = antipatterns['ID'].shape[0]
percentage_num_smells = round(num_smells / tot_smells * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
num_smelly_projects = antipatterns['Project'].unique().shape[0]
percentage_num_smelly_projects = round(num_smelly_projects / tot_ana_projects * 100, 1)
print("#smelly-projects: " + str(num_smelly_projects) + "(" + str(percentage_num_smelly_projects) + "%)")
num_smelly_owners = antipatterns['Owner'].unique().shape[0]
percentage_num_smelly_owners = round(num_smelly_owners / tot_ana_owners * 100, 1)
print("#smelly-owners: " + str(num_smelly_owners) + "(" + str(percentage_num_smelly_owners) + "%)")
def printOccurrencesPerCluster(apdf, tot_smells, tot_ana_projects, tot_ana_owners, tot_ana_projects_versioning,
tot_ana_owners_versioning):
print("\n-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences(versioning, tot_smells, tot_ana_projects_versioning, tot_ana_owners_versioning)
print("\n-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences(allow_failure, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences(retry, tot_smells, tot_ana_projects, tot_ana_owners)
print("\n-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences(manual, tot_smells, tot_ana_projects, tot_ana_owners)
def printOccurrences2(df, tot):
num_smells = df['ID'].shape[0]
percentage_num_smells = round(num_smells / tot * 100, 1)
print("#smells: " + str(num_smells) + "(" + str(percentage_num_smells) + "%)")
def printOccurrencesPerCluster2(apdf, tot_versioning, tot_allow, tot_retry, tot_manual):
print("-> Versioning")
versioning = apdf.loc[apdf["Category"] == "Versioning"]
printOccurrences2(versioning, tot_versioning)
print("-> Job-Allow-Failure")
allow_failure = apdf.loc[apdf["Category"] == "Job-Allow-Failure"]
printOccurrences2(allow_failure, tot_allow)
print("-> Job-Retry")
retry = apdf.loc[apdf["Category"] == "Job-Retry"]
printOccurrences2(retry, tot_retry)
print("-> Manual-Job")
manual = apdf.loc[apdf["Category"] == "Manual-Job"]
printOccurrences2(manual, tot_manual)
def versioning_occurrences_byfile(smell):
tot_incidents = smell.shape[0]
affected_files = smell["Remote Configuration File Link"].unique().shape[0]
yml_incidents = smell.loc[smell["Configuration File Name"] == ".gitlab-ci.yml"]
tot_yml_incidents = yml_incidents.shape[0]
tot_affected_yml = yml_incidents["Remote Configuration File Link"].unique().shape[0]
req_incidents = smell.loc[smell["Configuration File Name"] == "requirements.txt"]
tot_req_incidents = req_incidents.shape[0]
tot_affected_req = req_incidents["Remote Configuration File Link"].unique().shape[0]
pom_incidents = smell.loc[(smell["Configuration File Name"] != ".gitlab-ci.yml") &
(smell["Configuration File Name"] != "requirements.txt")]
tot_pom_incidents = pom_incidents.shape[0]
tot_affected_pom = pom_incidents["Remote Configuration File Link"].unique().shape[0]
print("tot_incidents: " + str(tot_incidents))
print("affected_files: " + str(affected_files))
print("tot_yml_incidents: " + str(tot_yml_incidents) + "(" + str(
round(tot_yml_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_yml_files: " + str(tot_affected_yml))
print("tot_req_incidents: " + str(tot_req_incidents) + "(" + str(
round(tot_req_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_req_files: " + str(tot_affected_req))
print("tot_pom_incidents: " + str(tot_pom_incidents) + "(" + str(
round(tot_pom_incidents / tot_incidents * 100, 2)) + "%)")
print("affected_pom_files: " + str(tot_affected_pom))
# RQ1 data analysis
def rqone_results(issueReportFile, path):
with open(issueReportFile, 'r', encoding="utf-8") as input_file:
frame = pandas.read_csv(input_file) # read file in a dataframe
tot_opened_issues = frame.loc[pandas.notna(frame["category"]) &
(frame["category"] != "Vulnerability") &
(frame["category"] != "Job-Retry (duplicate)")].shape[0]
print("### Tot opened issues: " + str(tot_opened_issues))
# select only active projects
df = frame.loc[(frame["commitsSinceIssue"] >= 1) &
(frame["category"] != "Vulnerability") &
pandas.notna(frame["category"]) &
(frame["category"] != "Job-Retry (duplicate)")]
categories = df.category.unique()
print("\n#### Opened issues (active projects since September 2019) ####")
openend_series = buildSeriesByCategory(df, categories)
printStatsByCategory(df, categories)
activeProjects = df.shape[0]
print("Total: " + str(activeProjects) + " (" +
str(tot_opened_issues - activeProjects) + " inactive)")
print("\n#### Issues (with a reaction) ####")
# select rows by column values
react_df = df.loc[(df["fixed (y/n/m)"] == "y") |
(df["numUpvotes"] > 0) |
(df["numDownvotes"] > 0) |
(( | pandas.notna(df["reaction"]) | pandas.notna |
# -*- coding: utf-8 -*-
# -*- python 3 -*-
# -*- <NAME> -*-
# Import packages
import re
import numpy as np
import pandas as pd
import os ##for directory
import sys
import pprint
'''general function for easy use of python'''
def splitAndCombine(gene, rxn, sep0, moveDuplicate=False):
## one rxn has several genes, this function was used to splite the genes
## used for the dataframe data
gene = gene.fillna('NA') # fill the NaN with 'NA'
gene0 = gene.tolist()
rxn0 = rxn.tolist()
s1 = list()
s2 = list()
for i in range(len(gene0)):
s1 = s1 + [rxn0[i]] * len(gene0[i].split(sep0))
s2 = s2 + gene0[i].split(sep0)
df0 = pd.DataFrame({'V1': s1,
'V2': s2}
)
if moveDuplicate == True:
df00 = df0.drop_duplicates()
else:
df00 = df0
return df00
def getSimilarTarget(rxn_yeast0,rxn_newGPR0,ss):
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
rxn_yeast1 = np.array(rxn_yeast0) # np.ndarray()
rxn_yeast2 = rxn_yeast1.tolist()
rxn_yeast3 = pd.Series((v[0] for v in rxn_yeast2))
rxn_newGPR1 = np.array(rxn_newGPR0) # np.ndarray()
rxn_newGPR2 = rxn_newGPR1.tolist()
rxn_newGPR3 = pd.Series((v[0] for v in rxn_newGPR2))
similarTarget = [None] * ss
for i in range(ss):
similarTarget[i] = process.extract(rxn_newGPR3[i], rxn_yeast3, limit=2)
return similarTarget
'''
#example
newMet = pd.read_excel('new metabolite for check.xlsx')
newMet0 = newMet[['name_unify']]
gemMet = pd.read_excel('unique metabolite in yeastGEM.xlsx')
gemMet0 = gemMet[['Description_simple']]
ss0 = len(newMet0)
similarTarget0 = getSimilarTarget(gemMet0,newMet0,ss=ss0)
'''
def singleMapping (description, item1, item2, dataframe=True):
"""get the single description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
# used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
index = [None]*len(item2)
result = [None]*len(item2)
tt = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index[i] = item1.index(item2[i])
result[i] = description[index[i]]
else:
index[i] = None
result[i] = None
return result
'''
w=['a','b','c']
v=[1,2,3]
s=[3,1,2,4]
singleMapping(w,v,s,dataframe=False)
'''
def multiMapping (description, item1, item2, dataframe=True, sep=";", removeDuplicates=True):
"""get multiple description of from item1 for item2 based on mapping"""
#description = w
#item1 = v
#item2 = testData
#used for the list data
if dataframe:
description = description.tolist()
item1 = item1.tolist()
item2 = item2.tolist()
else:
pass
result = [None]*len(item2)
for i in range(len(item2)):
if item2[i] in item1:
index0 = [description[index] for index in range(len(item1)) if item1[index] == item2[i]]
if removeDuplicates:
index1 = pd.unique(index0).tolist()
else:
index1 = index0
result[i] = sep.join(str(e) for e in index1) #string cat
else:
result[i] = None
return result
'''
# example data to test all the above function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'D' : np.random.randn(12)})
df2['C'] = singleMapping(df1['C'], df1['A'], df2['A'])
df2['C'] = multiMapping(df1['C'], df1['A'], df2['A'])
'''
def updateOneColumn(df1, df2, key0, value0):
"""
using dataframe df2 to update the df1
:param df1:
:param df2:
:param key0: the common column name, a string, used for the mapping
:param value0: the column in df2 used to update the df1
:return:
example
df10 = pd.DataFrame({'A': ['a', 'b', 'c'],
'B': ['x', 'y', 'z']})
df20 = pd.DataFrame({'A':['c','b'],
'B': ['e', 'd']})
updateOneColumn(df10,df20,key0='A',value0='B')
"""
df10 = df1.copy()
df11 = df1.copy()
df10[value0] = multiMapping(df2[value0], df2[key0], df10[key0])
for i, x in df10.iterrows():
print(x[value0])
if x[value0] is None:
df11[value0][i] = df11[value0][i]
else:
df11[value0][i] = df10[value0][i]
return df11[value0]
def RemoveDuplicated(s1):
"""
example:
s1=['a // a', 'b // a', None, 'non']
"""
s2=list()
for x in s1:
print(x)
if x =='non':
s2.append('')
elif x is None:
s2.append('')
else:
if "//" in x:
s0= x.split(' // ')
s0 = [x.strip() for x in s0]
s01= list(set(s0))
if len(s01)==1:
s2.append(s01[0])
else:
s2.append(' // '.join(s01))
else:
s2.append(x)
return s2
def nz(value):
'''
Convert None to string else return value.
'''
if value == None:
return 'none'
return value
def AutoUpdate(description1, para1, description2, para2):
# using the description1 in para1 to update the description2 in para2
description1 = description1.tolist()
para1 = para1.tolist()
description2 = description2.tolist()
para2 = para2.tolist()
ss = [None]*len(para2)
for i in range(len(para2)):
if para2[i] in para1:
ss[i] = para1.index(para2[i])
else:
ss[i] = None
for i in range(len(para2)):
if ss[i] != None:
description2[i] = description1[ss[i]]
else:
description2[i] = description2[i]
return description2
'''
# example data to test the followed function
df1 = pd.DataFrame({'A' : ['one', 'one', 'two', 'three'] * 3,
'B' : ['A', 'B', 'C'] * 4,
'C' : ['foo', 'foo', 'foo', 'bar', 'bar', 'bar'] * 2}
)
df2 = df1.iloc[[1,2]]
df2['C'] = ['good','good']
df1['C'] = AutoUpdate(df2['C'],df2['A'],df1['C'],df1['A'])
'''
def calculateFrequency(list0, item0):
'''
This function is used to calculate the frequency occured in a list and turn the frequency list into a dataframe
:param list0: ['a','b','a']
:param item0:
:return: a dataframe with two columns
'''
summary = | pd.Series(list0) | pandas.Series |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# You can run this test by first running `nPython.exe` (with mono or otherwise):
# $ ./nPython.exe ReportChartTests.py
import numpy as np
import pandas as pd
from datetime import datetime
from ReportCharts import ReportCharts
charts = ReportCharts()
## Test GetReturnsPerTrade
backtest = list(np.random.normal(0, 1, 1000))
live = list(np.random.normal(0.5, 1, 400))
result = charts.GetReturnsPerTrade([], [])
result = charts.GetReturnsPerTrade(backtest, [])
result = charts.GetReturnsPerTrade(backtest, live)
## Test GetCumulativeReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
strategy = np.linspace(1, 25, 365)
benchmark = np.linspace(2, 26, 365)
backtest = [time, strategy, time, benchmark]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=50)]
strategy = np.linspace(25, 29, 50)
benchmark = np.linspace(26, 30, 50)
live = [time, strategy, time, benchmark]
result = charts.GetCumulativeReturns()
result = charts.GetCumulativeReturns(backtest)
result = charts.GetCumulativeReturns(backtest, live)
## Test GetDailyReturnsPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01T00:00:00', periods=365)]
data = list(np.random.normal(0, 1, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01T00:00:00', periods=120)]
data = list(np.random.normal(0.5, 1.5, 120))
live = [time, data]
empty = [[], []]
result = charts.GetDailyReturns(empty, empty)
result = charts.GetDailyReturns(backtest, empty)
result = charts.GetDailyReturns(backtest, live)
## Test GetMonthlyReturnsPlot
backtest = {'2016': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2017': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5][::-1]}
live = {'2018': [0.5, 0.7, 0.2, 0.23, 1.3, 1.45, 1.67, -2.3, -0.5, 1.23, 1.23, -3.5],
'2019': [1.5, 2.7, -3.2, -0.23, 4.3, -2.45, -1.67, 2.3, np.nan, np.nan, np.nan, np.nan]}
result = charts.GetMonthlyReturns({}, {})
result = charts.GetMonthlyReturns(backtest, pd.DataFrame())
result = charts.GetMonthlyReturns(backtest, live)
## Test GetAnnualReturnsPlot
time = ['2012', '2013', '2014', '2015', '2016']
strategy = list(np.random.normal(0, 1, 5))
backtest = [time, strategy]
time = ['2017', '2018']
strategy = list(np.random.normal(0.5, 1.5, 2))
live = [time, strategy]
empty = [[], []]
result = charts.GetAnnualReturns()
result = charts.GetAnnualReturns(backtest)
result = charts.GetAnnualReturns(backtest, live)
## Test GetDrawdownPlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01', periods=365)]
data = list(np.random.uniform(-5, 0, 365))
backtest = [time, data]
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01', periods=100)]
data = list(np.random.uniform(-5, 0, 100))
live = [time, data]
worst = [{'Begin': datetime(2012, 10, 1), 'End': datetime(2012, 10, 11)},
{'Begin': datetime(2012, 12, 1), 'End': datetime(2012, 12, 11)},
{'Begin': datetime(2013, 3, 1), 'End': datetime(2013, 3, 11)},
{'Begin': datetime(2013, 4, 1), 'End': datetime(2013, 4, 1)},
{'Begin': datetime(2013, 6, 1), 'End': datetime(2013, 6, 11)}]
empty = [[], []]
result = charts.GetDrawdown(empty, empty, {})
result = charts.GetDrawdown(backtest, empty, worst)
result = charts.GetDrawdown(backtest, live, worst)
## Test GetCrisisPlots (backtest only)
equity = list(np.linspace(1, 25, 365))
benchmark = list(np.linspace(2, 26, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, equity, benchmark]
empty = [[], [], []]
result = charts.GetCrisisEventsPlots(empty, 'empty_crisis')
result = charts.GetCrisisEventsPlots(backtest, 'dummy_crisis')
## Test GetRollingBetaPlot
empty = [[], [], [], []]
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365)]
backtest = [time, six, twelve]
result = charts.GetRollingBeta([time, six, time, twelve], empty)
result = charts.GetRollingBeta([time, six, [], []], empty)
result = charts.GetRollingBeta(empty, empty)
twelve = [np.nan for x in range(180)] + list(np.random.uniform(-1, 1, 185))
six = list(np.random.uniform(-1, 1, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2013-10-01 00:00:00', periods=365)]
live = [time, six, time, twelve]
result = charts.GetRollingBeta(live)
## Test GetRollingSharpeRatioPlot
data = list(np.random.uniform(1, 3, 365 * 2))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2012-10-01 00:00:00', periods=365 * 2)]
backtest = [time, data]
data = list(np.random.uniform(1, 3, 365))
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2014-10-01 00:00:00', periods=365)]
live = [time, data]
empty = [[], []]
result = charts.GetRollingSharpeRatio(empty, empty)
result = charts.GetRollingSharpeRatio(backtest, empty)
result = charts.GetRollingSharpeRatio(backtest, live)
## Test GetAssetAllocationPlot
backtest = [['SPY', 'IBM', 'NFLX', 'AAPL'], [0.50, 0.25, 0.125, 0.125]]
live = [['SPY', 'IBM', 'AAPL'], [0.4, 0.4, 0.2]]
empty = [[], []]
result = charts.GetAssetAllocation(empty, empty)
result = charts.GetAssetAllocation(backtest, empty)
result = charts.GetAssetAllocation(backtest, live)
## Test GetLeveragePlot
backtest = [[pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2014-10-01', periods=365)],
list(np.random.uniform(0.5, 1.5, 365))]
live = [[pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2015-10-01', periods=100)],
list(np.random.uniform(0.5, 2, 100))]
empty = [[], []]
result = charts.GetLeverage(empty, empty)
result = charts.GetLeverage(backtest, empty)
result = charts.GetLeverage(backtest, live)
## Test GetExposurePlot
time = [pd.Timestamp(x).to_pydatetime() for x in pd.date_range('2014-10-01', periods=365)]
long_securities = ['Equity']
short_securities = ['Forex']
long = [np.random.uniform(0, 0.5, 365)]
short = [np.random.uniform(-0.5, 0, 365)]
live_time = [pd.Timestamp(x).to_pydatetime() for x in | pd.date_range('2015-10-01', periods=100) | pandas.date_range |
# Created by <NAME>
# email : <EMAIL>
import json
import os
import time
from concurrent import futures
from copy import deepcopy
from pathlib import Path
from typing import IO, Union, List
from collections import defaultdict
import re
from itertools import tee
import logging
# Non standard libraries
import pandas as pd
from urllib import parse
from aanalytics2 import config, connector, token_provider
from .projects import *
from .requestCreator import RequestCreator
JsonOrDataFrameType = Union[pd.DataFrame, dict]
JsonListOrDataFrameType = Union[pd.DataFrame, List[dict]]
def retrieveToken(verbose: bool = False, save: bool = False, **kwargs)->str:
"""
LEGACY retrieve token directly following the importConfigFile or Configure method.
"""
token_with_expiry = token_provider.get_token_and_expiry_for_config(config.config_object,**kwargs)
token = token_with_expiry['token']
config.config_object['token'] = token
config.config_object['date_limit'] = time.time() + token_with_expiry['expiry'] / 1000 - 500
config.header.update({'Authorization': f'Bearer {token}'})
if verbose:
print(f"token valid till : {time.ctime(time.time() + token_with_expiry['expiry'] / 1000)}")
return token
class Login:
"""
Class to connect to the the login company.
"""
loggingEnabled = False
logger = None
def __init__(self, config: dict = config.config_object, header: dict = config.header, retry: int = 0,loggingObject:dict=None) -> None:
"""
Instantiate the Loggin class.
Arguments:
config : REQUIRED : dictionary with your configuration information.
header : REQUIRED : dictionary of your header.
retry : OPTIONAL : if you want to retry, the number of time to retry
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
"""
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.login")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.COMPANY_IDS = {}
self.retry = retry
def getCompanyId(self,verbose:bool=False) -> dict:
"""
Retrieve the company ids for later call for the properties.
"""
if self.loggingEnabled:
self.logger.debug("getCompanyId start")
res = self.connector.getData(
"https://analytics.adobe.io/discovery/me", headers=self.header)
json_res = res
if self.loggingEnabled:
self.logger.debug(f"getCompanyId reponse: {json_res}")
try:
companies = json_res['imsOrgs'][0]['companies']
self.COMPANY_IDS = json_res['imsOrgs'][0]['companies']
return companies
except:
if verbose:
print("exception when trying to get companies with parameter 'all'")
print(json_res)
if self.loggingEnabled:
self.logger.error(f"Error trying to get companyId: {json_res}")
return None
def createAnalyticsConnection(self, companyId: str = None,loggingObject:dict=None) -> object:
"""
Returns an instance of the Analytics class so you can query the different elements from that instance.
Arguments:
companyId: REQUIRED : The globalCompanyId that you want to use in your connection
loggingObject : OPTIONAL : If you want to set logging capability for your actions.
the retry parameter set in the previous class instantiation will be used here.
"""
analytics = Analytics(company_id=companyId,
config_object=self.connector.config, header=self.header, retry=self.retry,loggingObject=loggingObject)
return analytics
class Analytics:
"""
Class that instantiate a connection to a single login company.
"""
# Endpoints
header = {"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "Bearer ",
"X-Api-Key": ""
}
_endpoint = 'https://analytics.adobe.io/api'
_getRS = '/collections/suites'
_getDimensions = '/dimensions'
_getMetrics = '/metrics'
_getSegments = '/segments'
_getCalcMetrics = '/calculatedmetrics'
_getUsers = '/users'
_getDateRanges = '/dateranges'
_getReport = '/reports'
loggingEnabled = False
logger = None
def __init__(self, company_id: str = None, config_object: dict = config.config_object, header: dict = config.header,
retry: int = 0,loggingObject:dict=None):
"""
Instantiate the Analytics class.
The Analytics class will be automatically connected to the API 2.0.
You have possibility to review the connection detail by looking into the connector instance.
"header", "company_id" and "endpoint_company" are attribute accessible for debugging.
Arguments:
company_id : REQUIRED : company ID retrieved by the getCompanyId
retry : OPTIONAL : Number of time you want to retrieve fail calls
loggingObject : OPTIONAL : logging object to log actions during runtime.
config_object : OPTIONAL : config object to be used for setting token (do not update if you do not know)
header : OPTIONAL : template header used for all requests (do not update if you do not know!)
"""
if company_id is None:
raise AttributeError(
'Expected "company_id" to be referenced.\nPlease ensure you pass the globalCompanyId when instantiating this class.')
if loggingObject is not None and sorted(["level","stream","format","filename","file"]) == sorted(list(loggingObject.keys())):
self.loggingEnabled = True
self.logger = logging.getLogger(f"{__name__}.analytics")
self.logger.setLevel(loggingObject["level"])
formatter = logging.Formatter(loggingObject["format"])
if loggingObject["file"]:
fileHandler = logging.FileHandler(loggingObject["filename"])
fileHandler.setFormatter(formatter)
self.logger.addHandler(fileHandler)
if loggingObject["stream"]:
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self.logger.addHandler(streamHandler)
self.connector = connector.AdobeRequest(
config_object=config_object, header=header, retry=retry,loggingEnabled=self.loggingEnabled,logger=self.logger)
self.header = self.connector.header
self.connector.header['x-proxy-global-company-id'] = company_id
self.header['x-proxy-global-company-id'] = company_id
self.endpoint_company = f"{self._endpoint}/{company_id}"
self.company_id = company_id
self.listProjectIds = []
self.projectsDetails = {}
self.segments = []
self.calculatedMetrics = []
try:
import importlib.resources as pkg_resources
pathLOGS = pkg_resources.path(
"aanalytics2", "eventType_usageLogs.pickle")
except ImportError:
try:
# Try backported to PY<37 `importlib_resources`.
import pkg_resources
pathLOGS = pkg_resources.resource_filename(
"aanalytics2", "eventType_usageLogs.pickle")
except:
print('Empty LOGS_EVENT_TYPE attribute')
try:
with pathLOGS as f:
self.LOGS_EVENT_TYPE = pd.read_pickle(f)
except:
self.LOGS_EVENT_TYPE = "no data"
def __str__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def __repr__(self)->str:
obj = {
"endpoint" : self.endpoint_company,
"companyId" : self.company_id,
"header" : self.header,
"token" : self.connector.config['token']
}
return json.dumps(obj,indent=4)
def refreshToken(self, token: str = None):
if token is None:
raise AttributeError(
'Expected "token" to be referenced.\nPlease ensure you pass the token.')
self.header['Authorization'] = "Bearer " + token
def decodeAArequests(self,file:IO=None,urls:Union[list,str]=None,save:bool=False,**kwargs)->pd.DataFrame:
"""
Takes any of the parameter to load adobe url and decompose the requests into a dataframe, that you can save if you want.
Arguments:
file : OPTIONAL : file referencing the different requests saved (excel, or txt)
urls : OPTIONAL : list of requests (or a single request) that you want to decode.
save : OPTIONAL : parameter to save your decode list into a csv file.
Returns a dataframe.
possible kwargs:
encoding : the type of encoding to decode the file
"""
if self.loggingEnabled:
self.logger.debug(f"Starting decodeAArequests")
if file is None and urls is None:
raise ValueError("Require at least file or urls to contains data")
if file is not None:
if '.txt' in file:
with open(file,'r',encoding=kwargs.get('encoding','utf-8')) as f:
urls = f.readlines() ## passing decoding to urls
elif '.xlsx' in file:
temp_df = pd.read_excel(file,header=None)
urls = list(temp_df[0]) ## passing decoding to urls
if urls is not None:
if type(urls) == str:
data = parse.parse_qsl(urls)
df = pd.DataFrame(data)
df.columns = ['index','request']
df.set_index('index',inplace=True)
if save:
df.to_csv(f'request_{int(time.time())}.csv')
return df
elif type(urls) == list: ## decoding list of strings
tmp_list = [parse.parse_qsl(data) for data in urls]
tmp_dfs = [pd.DataFrame(data) for data in tmp_list]
tmp_dfs2 = []
for df, index in zip(tmp_dfs,range(len(tmp_dfs))):
df.columns = ['index',f"request {index+1}"]
## cleanup timestamp from request url
string = df.iloc[0,0]
df.iloc[0,0] = re.search('http.*://(.+?)/s[0-9]+.*',string).group(1) # tracking server
df.set_index('index',inplace=True)
new_df = df
tmp_dfs2.append(new_df)
df_full = pd.concat(tmp_dfs2,axis=1)
if save:
df_full.to_csv(f'requests_{int(time.time())}.csv')
return df_full
def getReportSuites(self, txt: str = None, rsid_list: str = None, limit: int = 100, extended_info: bool = False,
save: bool = False) -> list:
"""
Get the reportSuite IDs data. Returns a dataframe of reportSuite name and report suite id.
Arguments:
txt : OPTIONAL : returns the reportSuites that matches a speific text field
rsid_list : OPTIONAL : returns the reportSuites that matches the list of rsids set
limit : OPTIONAL : How many reportSuite retrieves per serverCall
save : OPTIONAL : if set to True, it will save the list in a file. (Default False)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getReportSuite")
nb_error, nb_empty = 0, 0 # use for multi-thread loop
params = {}
params.update({'limit': str(limit)})
params.update({'page': '0'})
if txt is not None:
params.update({'rsidContains': str(txt)})
if rsid_list is not None:
params.update({'rsids': str(rsid_list)})
params.update(
{"expansion": "name,parentRsid,currency,calendarType,timezoneZoneinfo"})
if self.loggingEnabled:
self.logger.debug(f"parameters : {params}")
rsids = self.connector.getData(self.endpoint_company + self._getRS,
params=params, headers=self.header)
content = rsids['content']
if not extended_info:
list_content = [{'name': item['name'], 'rsid': item['rsid']}
for item in content]
df_rsids = pd.DataFrame(list_content)
else:
df_rsids = pd.DataFrame(content)
total_page = rsids['totalPages']
last_page = rsids['lastPage']
if not last_page: # if last_page =False
callsToMake = total_page
list_params = [{**params, 'page': page}
for page in range(1, callsToMake)]
list_urls = [self.endpoint_company +
self._getRS for x in range(1, callsToMake)]
listheaders = [self.header for x in range(1, callsToMake)]
workers = min(10, total_page)
with futures.ThreadPoolExecutor(workers) as executor:
res = executor.map(lambda x, y, z: self.connector.getData(
x, y, headers=z), list_urls, list_params, listheaders)
res = list(res)
list_data = [val for sublist in [r['content']
for r in res if 'content' in r.keys()] for val in sublist]
nb_error = sum(1 for elem in res if 'error_code' in elem.keys())
nb_empty = sum(1 for elem in res if 'content' in elem.keys() and len(
elem['content']) == 0)
if not extended_info:
list_append = [{'name': item['name'], 'rsid': item['rsid']}
for item in list_data]
df_append = pd.DataFrame(list_append)
else:
df_append = pd.DataFrame(list_data)
df_rsids = df_rsids.append(df_append, ignore_index=True)
if save:
if self.loggingEnabled:
self.logger.debug(f"saving rsids : {params}")
df_rsids.to_csv('RSIDS.csv', sep='\t')
if nb_error > 0 or nb_empty > 0:
message = f'WARNING : Retrieved data are partial.\n{nb_error}/{len(list_urls) + 1} requests returned an error.\n{nb_empty}/{len(list_urls)} requests returned an empty response. \nTry to use filter to retrieve reportSuite or increase limit per request'
print(message)
if self.loggingEnabled:
self.logger.warning(message)
return df_rsids
def getVirtualReportSuites(self, extended_info: bool = False, limit: int = 100, filterIds: str = None,
idContains: str = None, segmentIds: str = None, save: bool = False) -> list:
"""
return a lit of virtual reportSuites and their id. It can contain more information if expansion is selected.
Arguments:
extended_info : OPTIONAL : boolean to retrieve the maximum of information.
limit : OPTIONAL : How many reportSuite retrieves per serverCall
filterIds : OPTIONAL : comma delimited list of virtual reportSuite ID to be retrieved.
idContains : OPTIONAL : element that should be contained in the Virtual ReportSuite Id
segmentIds : OPTIONAL : comma delimited list of segmentId contained in the VRSID
save : OPTIONAL : if set to True, it will save the list in a file. (Default False)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuites")
expansion_values = "globalCompanyKey,parentRsid,parentRsidName,timezone,timezoneZoneinfo,currentTimezoneOffset,segmentList,description,modified,isDeleted,dataCurrentAsOf,compatibility,dataSchema,sessionDefinition,curatedComponents,type"
params = {"limit": limit}
nb_error = 0
nb_empty = 0
list_urls = []
if extended_info:
params['expansion'] = expansion_values
if filterIds is not None:
params['filterByIds'] = filterIds
if idContains is not None:
params['idContains'] = idContains
if segmentIds is not None:
params['segmentIds'] = segmentIds
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites"
if self.loggingEnabled:
self.logger.debug(f"params: {params}")
vrsid = self.connector.getData(
path, params=params, headers=self.header)
content = vrsid['content']
if not extended_info:
list_content = [{'name': item['name'], 'vrsid': item['id']}
for item in content]
df_vrsids = pd.DataFrame(list_content)
else:
df_vrsids = pd.DataFrame(content)
total_page = vrsid['totalPages']
last_page = vrsid['lastPage']
if not last_page: # if last_page =False
callsToMake = total_page
list_params = [{**params, 'page': page}
for page in range(1, callsToMake)]
list_urls = [path for x in range(1, callsToMake)]
listheaders = [self.header for x in range(1, callsToMake)]
workers = min(10, total_page)
with futures.ThreadPoolExecutor(workers) as executor:
res = executor.map(lambda x, y, z: self.connector.getData(
x, y, headers=z), list_urls, list_params, listheaders)
res = list(res)
list_data = [val for sublist in [r['content']
for r in res if 'content' in r.keys()] for val in sublist]
nb_error = sum(1 for elem in res if 'error_code' in elem.keys())
nb_empty = sum(1 for elem in res if 'content' in elem.keys() and len(
elem['content']) == 0)
if not extended_info:
list_append = [{'name': item['name'], 'vrsid': item['id']}
for item in list_data]
df_append = pd.DataFrame(list_append)
else:
df_append = pd.DataFrame(list_data)
df_vrsids = df_vrsids.append(df_append, ignore_index=True)
if save:
df_vrsids.to_csv('VRSIDS.csv', sep='\t')
if nb_error > 0 or nb_empty > 0:
message = f'WARNING : Retrieved data are partial.\n{nb_error}/{len(list_urls) + 1} requests returned an error.\n{nb_empty}/{len(list_urls)} requests returned an empty response. \nTry to use filter to retrieve reportSuite or increase limit per request'
print(message)
if self.loggingEnabled:
self.logger.warning(message)
return df_vrsids
def getVirtualReportSuite(self, vrsid: str = None, extended_info: bool = False,
format: str = 'df') -> JsonOrDataFrameType:
"""
return a single virtual report suite ID information as dataframe.
Arguments:
vrsid : REQUIRED : The virtual reportSuite to be retrieved
extended_info : OPTIONAL : boolean to add more information
format : OPTIONAL : format of the output. 2 values "df" for dataframe and "raw" for raw json.
"""
if vrsid is None:
raise Exception("require a Virtual ReportSuite ID")
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuite for {vrsid}")
expansion_values = "globalCompanyKey,parentRsid,parentRsidName,timezone,timezoneZoneinfo,currentTimezoneOffset,segmentList,description,modified,isDeleted,dataCurrentAsOf,compatibility,dataSchema,sessionDefinition,curatedComponents,type"
params = {}
if extended_info:
params['expansion'] = expansion_values
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites/{vrsid}"
data = self.connector.getData(path, params=params, headers=self.header)
if format == "df":
data = pd.DataFrame({vrsid: data})
return data
def getVirtualReportSuiteComponents(self, vrsid: str = None, nan_value=""):
"""
Uses the getVirtualReportSuite function to get a VRS and returns
the VRS components for a VRS as a dataframe. VRS must have Component Curation enabled.
Arguments:
vrsid : REQUIRED : Virtual Report Suite ID
nan_value : OPTIONAL : how to handle empty cells, default = ""
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getVirtualReportSuiteComponents")
vrs_data = self.getVirtualReportSuite(extended_info=True, vrsid=vrsid)
if "curatedComponents" not in vrs_data.index:
return pd.DataFrame()
components_cell = vrs_data[vrs_data.index ==
"curatedComponents"].iloc[0, 0]
return pd.DataFrame(components_cell).fillna(value=nan_value)
def createVirtualReportSuite(self, name: str = None, parentRsid: str = None, segmentList: list = None,
dataSchema: str = "Cache", data_dict: dict = None, **kwargs) -> dict:
"""
Create a new virtual report suite based on the information provided.
Arguments:
name : REQUIRED : name of the virtual reportSuite
parentRsid : REQUIRED : Parent reportSuite ID for the VRS
segmentLists : REQUIRED : list of segment id to be applied on the ReportSuite.
dataSchema : REQUIRED : Type of schema used for the VRSID. (default "Cache")
data_dict : OPTIONAL : you can pass directly the dictionary.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting createVirtualReportSuite")
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites"
expansion_values = "globalCompanyKey,parentRsid,parentRsidName,timezone,timezoneZoneinfo,currentTimezoneOffset,segmentList,description,modified,isDeleted,dataCurrentAsOf,compatibility,dataSchema,sessionDefinition,curatedComponents,type"
params = {'expansion': expansion_values}
if data_dict is None:
body = {
"name": name,
"parentRsid": parentRsid,
"segmentList": segmentList,
"dataSchema": dataSchema,
"description": kwargs.get('description', '')
}
else:
if 'name' not in data_dict.keys() or 'parentRsid' not in data_dict.keys() or 'segmentList' not in data_dict.keys() or 'dataSchema' not in data_dict.keys():
if self.loggingEnabled:
self.logger.error(f"Missing one or more fundamental keys : name, parentRsid, segmentList, dataSchema")
raise Exception("Missing one or more fundamental keys : name, parentRsid, segmentList, dataSchema")
body = data_dict
res = self.connector.postData(
path, params=params, data=body, headers=self.header)
return res
def updateVirtualReportSuite(self, vrsid: str = None, data_dict: dict = None, **kwargs) -> dict:
"""
Updates a Virtual Report Suite based on a JSON-like dictionary (same structure as createVirtualReportSuite)
Note that to update components, you need to supply ALL components currently associated with this suite.
Supplying only the components you want to change will remove all others from the VR Suite!
Arguments:
vrsid : REQUIRED : The id of the virtual report suite to update
data_dict : a json-like dictionary of the vrs data to update
"""
if vrsid is None:
raise Exception("require a virtual reportSuite ID")
if self.loggingEnabled:
self.logger.debug(f"Starting updateVirtualReportSuite for {vrsid}")
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites/{vrsid}"
body = data_dict
res = self.connector.putData(path, data=body, headers=self.header)
if self.loggingEnabled:
self.logger.debug(f"updateVirtualReportSuite response : {res}")
return res
def deleteVirtualReportSuite(self, vrsid: str = None) -> str:
"""
Delete a Virtual Report Suite based on the id passed.
Arguments:
vrsid : REQUIRED : The id of the virtual reportSuite to delete.
"""
if vrsid is None:
raise Exception("require a Virtual ReportSuite ID")
if self.loggingEnabled:
self.logger.debug(f"Starting deleteVirtualReportSuite for {vrsid}")
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites/{vrsid}"
res = self.connector.deleteData(path, headers=self.header)
if self.loggingEnabled:
self.logger.debug(f"deleteVirtualReportSuite {vrsid} response : {res}")
return res
def validateVirtualReportSuite(self, name: str = None, parentRsid: str = None, segmentList: list = None,
dataSchema: str = "Cache", data_dict: dict = None, **kwargs) -> dict:
"""
Validate the object to create a new virtual report suite based on the information provided.
Arguments:
name : REQUIRED : name of the virtual reportSuite
parentRsid : REQUIRED : Parent reportSuite ID for the VRS
segmentLists : REQUIRED : list of segment ids to be applied on the ReportSuite.
dataSchema : REQUIRED : Type of schema used for the VRSID (default : Cache).
data_dict : OPTIONAL : you can pass directly the dictionary.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting validateVirtualReportSuite")
path = f"{self.endpoint_company}/reportsuites/virtualreportsuites/validate"
expansion_values = "globalCompanyKey, parentRsid, parentRsidName, timezone, timezoneZoneinfo, currentTimezoneOffset, segmentList, description, modified, isDeleted, dataCurrentAsOf, compatibility, dataSchema, sessionDefinition, curatedComponents, type"
if data_dict is None:
body = {
"name": name,
"parentRsid": parentRsid,
"segmentList": segmentList,
"dataSchema": dataSchema,
"description": kwargs.get('description', '')
}
else:
if 'name' not in data_dict.keys() or 'parentRsid' not in data_dict.keys() or 'segmentList' not in data_dict.keys() or 'dataSchema' not in data_dict.keys():
raise Exception(
"Missing one or more fundamental keys : name, parentRsid, segmentList, dataSchema")
body = data_dict
res = self.connector.postData(path, data=body, headers=self.header)
if self.loggingEnabled:
self.logger.debug(f"validateVirtualReportSuite response : {res}")
return res
def getDimensions(self, rsid: str, tags: bool = False, description:bool=False, save=False, **kwargs) -> pd.DataFrame:
"""
Retrieve the list of dimensions from a specific reportSuite.Shrink columns to simplify output.
Returns the data frame of available dimensions.
Arguments:
rsid : REQUIRED : Report Suite ID from which you want the dimensions
tags : OPTIONAL : If you would like to have additional information, such as tags. (bool : default False)
description : OPTIONAL : Trying to add the description column. It may break the method.
save : OPTIONAL : If set to True, it will save the info in a csv file (bool : default False)
Possible kwargs:
full : Boolean : Doesn't shrink the number of columns if set to true
example : getDimensions(rsid,full=True)
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getDimensions")
params = {}
if tags:
params.update({'expansion': 'tags'})
params.update({'rsid': rsid})
dims = self.connector.getData(self.endpoint_company +
self._getDimensions, params=params, headers=self.header)
df_dims = pd.DataFrame(dims)
columns = ['id', 'name', 'category', 'type',
'parent', 'pathable']
if description:
columns.append('description')
if kwargs.get('full', False):
new_cols = pd.DataFrame(df_dims.support.values.tolist(),
columns=['support_oberon', 'support_dw']) # extract list in column
new_df = df_dims.merge(new_cols, right_index=True, left_index=True)
new_df.drop(['reportable', 'support'], axis=1, inplace=True)
df_dims = new_df
else:
df_dims = df_dims[columns]
if save:
df_dims.to_csv(f'dimensions_{rsid}.csv')
return df_dims
def getMetrics(self, rsid: str, tags: bool = False, save=False, description:bool=False, dataGroup:bool=False, **kwargs) -> pd.DataFrame:
"""
Retrieve the list of metrics from a specific reportSuite. Shrink columns to simplify output.
Returns the data frame of available metrics.
Arguments:
rsid : REQUIRED : Report Suite ID from which you want the dimensions (str)
tags : OPTIONAL : If you would like to have additional information, such as tags.(bool : default False)
dataGroup : OPTIONAL : Adding dataGroups to the column exported. Default False.
May break the report.
save : OPTIONAL : If set to True, it will save the info in a csv file (bool : default False)
Possible kwargs:
full : Boolean : Doesn't shrink the number of columns if set to true.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getMetrics")
params = {}
if tags:
params.update({'expansion': 'tags'})
params.update({'rsid': rsid})
metrics = self.connector.getData(self.endpoint_company +
self._getMetrics, params=params, headers=self.header)
df_metrics = pd.DataFrame(metrics)
columns = ['id', 'name', 'category', 'type',
'precision', 'segmentable']
if dataGroup:
columns.append('dataGroup')
if description:
columns.append('description')
if kwargs.get('full', False):
new_cols = pd.DataFrame(df_metrics.support.values.tolist(), columns=[
'support_oberon', 'support_dw'])
new_df = df_metrics.merge(
new_cols, right_index=True, left_index=True)
new_df.drop('support', axis=1, inplace=True)
df_metrics = new_df
else:
df_metrics = df_metrics[columns]
if save:
df_metrics.to_csv(f'metrics_{rsid}.csv', sep='\t')
return df_metrics
def getUsers(self, save: bool = False, **kwargs) -> pd.DataFrame:
"""
Retrieve the list of users for a login company.Returns a data frame.
Arguments:
save : OPTIONAL : Save the data in a file (bool : default False).
Possible kwargs:
limit : Nummber of results per requests. Default 100.
expansion : string list such as "lastAccess,createDate"
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getUsers")
list_urls = []
nb_error, nb_empty = 0, 0 # use for multi-thread loop
params = {'limit': kwargs.get('limit', 100)}
if kwargs.get("expansion", None) is not None:
params["expansion"] = kwargs.get("expansion", None)
users = self.connector.getData(self.endpoint_company +
self._getUsers, params=params, headers=self.header)
data = users['content']
lastPage = users['lastPage']
if not lastPage: # check if lastpage is inversed of False
callsToMake = users['totalPages']
list_params = [{'limit': params['limit'], 'page': page}
for page in range(1, callsToMake)]
list_urls = [self.endpoint_company +
self._getUsers for x in range(1, callsToMake)]
listheaders = [self.header
for x in range(1, callsToMake)]
workers = min(10, len(list_params))
with futures.ThreadPoolExecutor(workers) as executor:
res = executor.map(lambda x, y, z: self.connector.getData(x, y, headers=z), list_urls,
list_params, listheaders)
res = list(res)
users_lists = [elem['content']
for elem in res if 'content' in elem.keys()]
nb_error = sum(1 for elem in res if 'error_code' in elem.keys())
nb_empty = sum(1 for elem in res if 'content' in elem.keys()
and len(elem['content']) == 0)
append_data = [val for sublist in [data for data in users_lists]
for val in sublist] # flatten list of list
data = data + append_data
df_users = pd.DataFrame(data)
columns = ['email', 'login', 'fullName', 'firstName', 'lastName', 'admin', 'loginId', 'imsUserId', 'login',
'createDate', 'lastAccess', 'title', 'disabled', 'phoneNumber', 'companyid']
df_users = df_users[columns]
df_users['createDate'] = pd.to_datetime(df_users['createDate'])
df_users['lastAccess'] = pd.to_datetime(df_users['lastAccess'])
if save:
df_users.to_csv(f'users_{int(time.time())}.csv', sep='\t')
if nb_error > 0 or nb_empty > 0:
print(
f'WARNING : Retrieved data are partial.\n{nb_error}/{len(list_urls) + 1} requests returned an error.\n{nb_empty}/{len(list_urls)} requests returned an empty response. \nTry to use filter to retrieve users or increase limit')
return df_users
def getSegments(self, name: str = None, tagNames: str = None, inclType: str = 'all', rsids_list: list = None,
sidFilter: list = None, extended_info: bool = False, format: str = "df", save: bool = False,
verbose: bool = False, **kwargs) -> JsonListOrDataFrameType:
"""
Retrieve the list of segments. Returns a data frame.
Arguments:
name : OPTIONAL : Filter to only include segments that contains the name (str)
tagNames : OPTIONAL : Filter list to only include segments that contains one of the tags (string delimited with comma, can be list as well)
inclType : OPTIONAL : type of segments to be retrieved.(str) Possible values:
- all : Default value (all segments possibles)
- shared : shared segments
- template : template segments
- deleted : deleted segments
- internal : internal segments
- curatedItem : curated segments
rsid_list : OPTIONAL : Filter list to only include segments tied to specified RSID list (list)
sidFilter : OPTIONAL : Filter list to only include segments in the specified list (list)
extended_info : OPTIONAL : additional segment metadata fields to include on response (bool : default False)
if set to true, returns reportSuiteName, ownerFullName, modified, tags, compatibility, definition
format : OPTIONAL : defined the format returned by the query. (Default df)
possibe values :
"df" : default value that return a dataframe
"raw": return a list of value. More or less what is return from server.
save : OPTIONAL : If set to True, it will save the info in a csv file (bool : default False)
verbose : OPTIONAL : If set to True, print some information
Possible kwargs:
limit : number of segments retrieved by request. default 500: Limited to 1000 by the AnalyticsAPI.
NOTE : Segment Endpoint doesn't support multi-threading. Default to 500.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting getSegments")
limit = int(kwargs.get('limit', 500))
params = {'includeType': 'all', 'limit': limit}
if extended_info:
params.update(
{'expansion': 'reportSuiteName,ownerFullName,created,modified,tags,compatibility,definition,shares'})
if name is not None:
params.update({'name': str(name)})
if tagNames is not None:
if type(tagNames) == list:
tagNames = ','.join(tagNames)
params.update({'tagNames': tagNames})
if inclType != 'all':
params['includeType'] = inclType
if rsids_list is not None:
if type(rsids_list) == list:
rsids_list = ','.join(rsids_list)
params.update({'rsids': rsids_list})
if sidFilter is not None:
if type(sidFilter) == list:
sidFilter = ','.join(sidFilter)
params.update({'rsids': sidFilter})
data = []
lastPage = False
page_nb = 0
if verbose:
print("Starting requesting segments")
while not lastPage:
params['page'] = page_nb
segs = self.connector.getData(self.endpoint_company +
self._getSegments, params=params, headers=self.header)
data += segs['content']
lastPage = segs['lastPage']
page_nb += 1
if verbose and page_nb % 10 == 0:
print(f"request #{page_nb / 10}")
if format == "df":
segments = pd.DataFrame(data)
else:
segments = data
if save and format == "df":
segments.to_csv(f'segments_{int(time.time())}.csv', sep='\t')
if verbose:
print(
f'Saving data in file : {os.getcwd()}{os.sep}segments_{int(time.time())}.csv')
elif save and format == "raw":
with open(f"segments_{int(time.time())}.csv","w") as f:
f.write(json.dumps(segments,indent=4))
return segments
def getSegment(self, segment_id: str = None,full:bool=False, *args) -> dict:
"""
Get a specific segment from the ID. Returns the object of the segment.
Arguments:
segment_id : REQUIRED : the segment id to retrieve.
full : OPTIONAL : Add all possible options
Possible args:
- "reportSuiteName" : string : to retrieve reportSuite attached to the segment
- "ownerFullName" : string : to retrieve ownerFullName attached to the segment
- "modified" : string : to retrieve when segment was modified
- "tags" : string : to retrieve tags attached to the segment
- "compatibility" : string : to retrieve which tool is compatible
- "definition" : string : definition of the segment
- "publishingStatus" : string : status for the segment
- "definitionLastModified" : string : last definition of the segment
- "categories" : string : categories of the segment
"""
ValidArgs = ["reportSuiteName", "ownerFullName", "modified", "tags", "compatibility",
"definition", "publishingStatus", "publishingStatus", "definitionLastModified", "categories"]
if segment_id is None:
raise Exception("Expected a segment id")
if self.loggingEnabled:
self.logger.debug(f"Starting getSegment for {segment_id}")
path = f"/segments/{segment_id}"
for element in args:
if element not in ValidArgs:
args.remove(element)
params = {'expansion': ','.join(args)}
if full:
params = {'expansion': ','.join(ValidArgs)}
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
return res
def scanSegment(self,segment:Union[str,dict],verbose:bool=False)->dict:
"""
Return the dimensions, metrics and reportSuite used and the main scope of the segment.
Arguments:
segment : REQUIRED : either the ID of the segment or the full definition.
verbose : OPTIONAL : print some comment.
"""
if self.loggingEnabled:
self.logger.debug(f"Starting scanSegment")
if type(segment) == str:
if verbose:
print('retrieving segment definition')
defSegment = self.getSegment(segment,full=True)
elif type(segment) == dict:
defSegment = deepcopy(segment)
if 'definition' not in defSegment.keys():
raise KeyError('missing "definition" key ')
if verbose:
print('copied segment definition')
mydef = str(defSegment['definition'])
dimensions : list = re.findall("'(variables/.+?)'",mydef)
metrics : list = re.findall("'(metrics/.+?)'",mydef)
reportSuite = defSegment['rsid']
scope = re.search("'context': '(.+)'}[^'context']+",mydef)
res = {
'dimensions' : set(dimensions) if len(dimensions)>0 else {},
'metrics' : set(metrics) if len(metrics)>0 else {},
'rsid' : reportSuite,
'scope' : scope.group(1)
}
return res
def createSegment(self, segmentJSON: dict = None) -> dict:
"""
Method that creates a new segment based on the dictionary passed to it.
Arguments:
segmentJSON : REQUIRED : the dictionary that represents the JSON statement for the segment.
More information at this address <https://adobedocs.github.io/analytics-2.0-apis/#/segments/segments_createSegment>
"""
if self.loggingEnabled:
self.logger.debug(f"starting createSegment")
if segmentJSON is None:
print('No segment data has been pushed')
return None
data = deepcopy(segmentJSON)
seg = self.connector.postData(
self.endpoint_company + self._getSegments,
data=data,
headers=self.header
)
return seg
def createSegmentValidate(self, segmentJSON: dict = None) -> object:
"""
Method that validate a new segment based on the dictionary passed to it.
Arguments:
segmentJSON : REQUIRED : the dictionary that represents the JSON statement for the segment.
More information at this address <https://adobedocs.github.io/analytics-2.0-apis/#/segments/segments_createSegment>
"""
if self.loggingEnabled:
self.logger.debug(f"starting createSegmentValidate")
if segmentJSON is None:
print('No segment data has been pushed')
return None
data = deepcopy(segmentJSON)
path = "/segments/validate"
seg = self.connector.postData(self.endpoint_company +path,data=data)
return seg
def updateSegment(self, segmentID: str = None, segmentJSON: dict = None) -> object:
"""
Method that updates a specific segment based on the dictionary passed to it.
Arguments:
segmentID : REQUIRED : Segment ID to be updated
segmentJSON : REQUIRED : the dictionary that represents the JSON statement for the segment.
"""
if self.loggingEnabled:
self.logger.debug(f"starting updateSegment")
if segmentJSON is None or segmentID is None:
print('No segment or segmentID data has been pushed')
if self.loggingEnabled:
self.logger.error(f"No segment or segmentID data has been pushed")
return None
data = deepcopy(segmentJSON)
seg = self.connector.putData(
self.endpoint_company + self._getSegments + '/' + segmentID,
data=data,
headers=self.header
)
return seg
def deleteSegment(self, segmentID: str = None) -> object:
"""
Method that updates a specific segment based on the dictionary passed to it.
Arguments:
segmentID : REQUIRED : Segment ID to be deleted
"""
if segmentID is None:
print('No segmentID data has been pushed')
return None
if self.loggingEnabled:
self.logger.debug(f"starting deleteSegment for {segmentID}")
seg = self.connector.deleteData(self.endpoint_company +
self._getSegments + '/' + segmentID, headers=self.header)
return seg
def getCalculatedMetrics(
self,
name: str = None,
tagNames: str = None,
inclType: str = 'all',
rsids_list: list = None,
extended_info: bool = False,
save=False,
format:str='df',
**kwargs
) -> pd.DataFrame:
"""
Retrieve the list of calculated metrics. Returns a data frame.
Arguments:
name : OPTIONAL : Filter to only include calculated metrics that contains the name (str)
tagNames : OPTIONAL : Filter list to only include calculated metrics that contains one of the tags (string delimited with comma, can be list as well)
inclType : OPTIONAL : type of calculated Metrics to be retrieved. (str) Possible values:
- all : Default value (all calculated metrics possibles)
- shared : shared calculated metrics
- template : template calculated metrics
rsid_list : OPTIONAL : Filter list to only include segments tied to specified RSID list (list)
extended_info : OPTIONAL : additional segment metadata fields to include on response (list)
additional infos: reportSuiteName,definition, ownerFullName, modified, tags, compatibility
save : OPTIONAL : If set to True, it will save the info in a csv file (Default False)
format : OPTIONAL : format of the output. 2 values "df" for dataframe and "raw" for raw json.
Possible kwargs:
limit : number of segments retrieved by request. default 500: Limited to 1000 by the AnalyticsAPI.(int)
"""
if self.loggingEnabled:
self.logger.debug(f"starting getCalculatedMetrics")
limit = int(kwargs.get('limit', 500))
params = {'includeType': inclType, 'limit': limit}
if name is not None:
params.update({'name': str(name)})
if tagNames is not None:
if type(tagNames) == list:
tagNames = ','.join(tagNames)
params.update({'tagNames': tagNames})
if inclType != 'all':
params['includeType'] = inclType
if rsids_list is not None:
if type(rsids_list) == list:
rsids_list = ','.join(rsids_list)
params.update({'rsids': rsids_list})
if extended_info:
params.update(
{'expansion': 'reportSuiteName,definition,ownerFullName,modified,tags,categories,compatibility,shares'})
metrics = self.connector.getData(self.endpoint_company +
self._getCalcMetrics, params=params)
data = metrics['content']
lastPage = metrics['lastPage']
if not lastPage: # check if lastpage is inversed of False
page_nb = 0
while not lastPage:
page_nb += 1
params['page'] = page_nb
metrics = self.connector.getData(self.endpoint_company +
self._getCalcMetrics, params=params, headers=self.header)
data += metrics['content']
lastPage = metrics['lastPage']
if format == "raw":
if save:
with open(f'calculated_metrics_{int(time.time())}.json','w') as f:
f.write(json.dumps(data,indent=4))
return data
df_calc_metrics = pd.DataFrame(data)
if save:
df_calc_metrics.to_csv(f'calculated_metrics_{int(time.time())}.csv', sep='\t')
return df_calc_metrics
def getCalculatedMetric(self,calculatedMetricId:str=None,full:bool=True)->dict:
"""
Return a dictionary on the calculated metrics requested.
Arguments:
calculatedMetricId : REQUIRED : The calculated metric ID to be retrieved.
full : OPTIONAL : additional segment metadata fields to include on response (list)
additional infos: reportSuiteName,definition, ownerFullName, modified, tags, compatibility
"""
if calculatedMetricId is None:
raise ValueError("Require a calculated metrics ID")
if self.loggingEnabled:
self.logger.debug(f"starting getCalculatedMetric for {calculatedMetricId}")
params = {}
if full:
params.update({'expansion': 'reportSuiteName,definition,ownerFullName,modified,tags,categories,compatibility'})
path = f"/calculatedmetrics/{calculatedMetricId}"
res = self.connector.getData(self.endpoint_company+path,params=params)
return res
def scanCalculatedMetric(self,calculatedMetric:Union[str,dict],verbose:bool=False)->dict:
"""
Return a dictionary of metrics and dimensions used in the calculated metrics.
"""
if self.loggingEnabled:
self.logger.debug(f"starting scanCalculatedMetric")
if type(calculatedMetric) == str:
if verbose:
print('retrieving calculated metrics definition')
cm = self.getCalculatedMetric(calculatedMetric,full=True)
elif type(calculatedMetric) == dict:
cm = deepcopy(calculatedMetric)
if 'definition' not in cm.keys():
raise KeyError('missing "definition" key')
if verbose:
print('copied calculated metrics definition')
mydef = str(cm['definition'])
segments:list = cm['compatibility'].get('segments',[])
res = {"dimensions":[],'metrics':[]}
for segment in segments:
if verbose:
print(f"retrieving segment {segment} definition")
tmp:dict = self.scanSegment(segment)
res['dimensions'] += [dim for dim in tmp['dimensions']]
res['metrics'] += [met for met in tmp['metrics']]
metrics : list = re.findall("'(metrics/.+?)'",mydef)
res['metrics'] += metrics
res['rsid'] = cm['rsid']
res['metrics'] = set(res['metrics']) if len(res['metrics'])>0 else {}
res['dimensions'] = set(res['dimensions']) if len(res['dimensions'])>0 else {}
return res
def createCalculatedMetric(self, metricJSON: dict = None) -> dict:
"""
Method that create a specific calculated metric based on the dictionary passed to it.
Arguments:
metricJSON : REQUIRED : Calculated Metrics information to create. (Required: name, definition, rsid)
More information can be found at this address https://adobedocs.github.io/analytics-2.0-apis/#/calculatedmetrics/calculatedmetrics_createCalculatedMetric
"""
if self.loggingEnabled:
self.logger.debug(f"starting createCalculatedMetric")
if metricJSON is None or type(metricJSON) != dict:
if self.loggingEnabled:
self.logger.error(f'Expected a dictionary to create the calculated metrics')
raise Exception(
"Expected a dictionary to create the calculated metrics")
if 'name' not in metricJSON.keys() or 'definition' not in metricJSON.keys() or 'rsid' not in metricJSON.keys():
if self.loggingEnabled:
self.logger.error(f'Expected "name", "definition" and "rsid" in the data')
raise KeyError(
'Expected "name", "definition" and "rsid" in the data')
cm = self.connector.postData(self.endpoint_company +
self._getCalcMetrics, headers=self.header, data=metricJSON)
return cm
def createCalculatedMetricValidate(self,metricJSON: dict=None)->dict:
"""
Method that validate a specific calculated metrics definition based on the dictionary passed to it.
Arguments:
metricJSON : REQUIRED : Calculated Metrics information to create. (Required: name, definition, rsid)
More information can be found at this address https://adobedocs.github.io/analytics-2.0-apis/#/calculatedmetrics/calculatedmetrics_createCalculatedMetric
"""
if self.loggingEnabled:
self.logger.debug(f"starting createCalculatedMetricValidate")
if metricJSON is None or type(metricJSON) != dict:
raise Exception(
"Expected a dictionary to create the calculated metrics")
if 'name' not in metricJSON.keys() or 'definition' not in metricJSON.keys() or 'rsid' not in metricJSON.keys():
if self.loggingEnabled:
self.logger.error(f'Expected "name", "definition" and "rsid" in the data')
raise KeyError(
'Expected "name", "definition" and "rsid" in the data')
path = "/calculatedmetrics/validate"
cm = self.connector.postData(self.endpoint_company+path, data=metricJSON)
return cm
def updateCalculatedMetric(self, calcID: str = None, calcJSON: dict = None) -> object:
"""
Method that updates a specific Calculated Metrics based on the dictionary passed to it.
Arguments:
calcID : REQUIRED : Calculated Metric ID to be updated
calcJSON : REQUIRED : the dictionary that represents the JSON statement for the calculated metric.
"""
if calcJSON is None or calcID is None:
print('No calcMetric or calcMetric JSON data has been passed')
return None
if self.loggingEnabled:
self.logger.debug(f"starting updateCalculatedMetric for {calcID}")
data = deepcopy(calcJSON)
cm = self.connector.putData(
self.endpoint_company + self._getCalcMetrics + '/' + calcID,
data=data,
headers=self.header
)
return cm
def deleteCalculatedMetric(self, calcID: str = None) -> object:
"""
Method that delete a specific calculated metrics based on the id passed..
Arguments:
calcID : REQUIRED : Calculated Metrics ID to be deleted
"""
if calcID is None:
print('No calculated metrics data has been passed')
return None
if self.loggingEnabled:
self.logger.debug(f"starting deleteCalculatedMetric for {calcID}")
cm = self.connector.deleteData(
self.endpoint_company + self._getCalcMetrics + '/' + calcID,
headers=self.header
)
return cm
def getDateRanges(self, extended_info: bool = False, save: bool = False, includeType: str = 'all',verbose:bool=False,
**kwargs) -> pd.DataFrame:
"""
Get the list of date ranges available for the user.
Arguments:
extended_info : OPTIONAL : additional segment metadata fields to include on response
additional infos: reportSuiteName, ownerFullName, modified, tags, compatibility, definition
save : OPTIONAL : If set to True, it will save the info in a csv file (Default False)
includeType : Include additional date ranges not owned by user. The "all" option takes precedence over "shared"
Possible values are all, shared, templates. You can add all of them as comma separated string.
Possible kwargs:
limit : number of segments retrieved by request. default 500: Limited to 1000 by the AnalyticsAPI.
full : Boolean : Doesn't shrink the number of columns if set to true
"""
if self.loggingEnabled:
self.logger.debug(f"starting getDateRanges")
limit = int(kwargs.get('limit', 500))
includeType = includeType.split(',')
params = {'limit': limit, 'includeType': includeType}
if extended_info:
params.update(
{'expansion': 'definition,ownerFullName,modified,tags'})
dateRanges = self.connector.getData(
self.endpoint_company + self._getDateRanges,
params=params,
headers=self.header,
verbose=verbose
)
data = dateRanges['content']
df_dates = pd.DataFrame(data)
if save:
df_dates.to_csv('date_range.csv', index=False)
return df_dates
def updateDateRange(self, dateRangeID: str = None, dateRangeJSON: dict = None) -> object:
"""
Method that updates a specific Date Range based on the dictionary passed to it.
Arguments:
dateRangeID : REQUIRED : Calculated Metric ID to be updated
dateRangeJSON : REQUIRED : the dictionary that represents the JSON statement for the calculated metric.
"""
if self.loggingEnabled:
self.logger.debug(f"starting updateDateRange")
if dateRangeJSON is None or dateRangeID is None:
print('No calcMetric or calcMetric JSON data has been pushed')
return None
data = deepcopy(dateRangeJSON)
dr = self.connector.putData(
self.endpoint_company + self._getDateRanges + '/' + dateRangeID,
data=data,
headers=self.header
)
return dr
def deleteDateRange(self, dateRangeID: str = None) -> object:
"""
Method that deletes a specific date Range based on the id passed.
Arguments:
dateRangeID : REQUIRED : ID of Date Range to be deleted
"""
if dateRangeID is None:
print('No Date Range ID has been pushed')
return None
if self.loggingEnabled:
self.logger.debug(f"starting deleteDateRange for {dateRangeID}")
response = self.connector.deleteData(
self.endpoint_company + self._getDateRanges + '/' + dateRangeID,
headers=self.header
)
return response
def getCalculatedFunctions(self, **kwargs) -> pd.DataFrame:
"""
Returns the calculated metrics functions.
"""
if self.loggingEnabled:
self.logger.debug(f"starting getCalculatedFunctions")
path = "/calculatedmetrics/functions"
limit = int(kwargs.get('limit', 500))
params = {'limit': limit}
funcs = self.connector.getData(
self.endpoint_company + path,
params=params,
headers=self.header
)
df = pd.DataFrame(funcs)
return df
def getTags(self, limit: int = 100, **kwargs) -> list:
"""
Return the list of tags
Arguments:
limit : OPTIONAL : Amount of tag to be returned by request. Default 100
"""
if self.loggingEnabled:
self.logger.debug(f"starting getTags")
path = "/componentmetadata/tags"
params = {'limit': limit}
if kwargs.get('page', False):
params['page'] = kwargs.get('page', 0)
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
data = res['content']
if not res['lastPage']:
page = res['number'] + 1
data += self.getTags(limit=limit, page=page)
return data
def getTag(self, tagId: str = None) -> dict:
"""
Return the a tag by its ID.
Arguments:
tagId : REQUIRED : the Tag ID to be retrieved.
"""
if tagId is None:
raise Exception("Require a tag ID for this method.")
if self.loggingEnabled:
self.logger.debug(f"starting getTag for {tagId}")
path = f"/componentmetadata/tags/{tagId}"
res = self.connector.getData(self.endpoint_company + path, headers=self.header)
return res
def getComponentTagName(self, tagNames: str = None, componentType: str = None) -> dict:
"""
Given a comma separated list of tag names, return component ids associated with them.
Arguments:
tagNames : REQUIRED : Comma separated list of tag names.
componentType : REQUIRED : The component type to operate on.
Available values : segment, dashboard, bookmark, calculatedMetric, project, dateRange, metric, dimension, virtualReportSuite, scheduledJob, alert, classificationSet
"""
path = "/componentmetadata/tags/tagnames"
if tagNames is None:
raise Exception("Requires tag names to be provided")
if self.loggingEnabled:
self.logger.debug(f"starting getComponentTagName for {tagNames}")
if componentType is None:
raise Exception("Requires a Component Type to be provided")
params = {
"tagNames": tagNames,
"componentType": componentType
}
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
return res
def searchComponentsTags(self, componentType: str = None, componentIds: list = None) -> dict:
"""
Search for the tags of a list of component by their ids.
Arguments:
componentType : REQUIRED : The component type to use in the search.
Available values : segment, dashboard, bookmark, calculatedMetric, project, dateRange, metric, dimension, virtualReportSuite, scheduledJob, alert, classificationSet
componentIds : REQUIRED : List of components Ids to use.
"""
if self.loggingEnabled:
self.logger.debug(f"starting searchComponentsTags")
if componentType is None:
raise Exception("ComponentType is required")
if componentIds is None or type(componentIds) != list:
raise Exception("componentIds is required as a list of ids")
path = "/componentmetadata/tags/component/search"
obj = {
"componentType": componentType,
"componentIds": componentIds
}
if self.loggingEnabled:
self.logger.debug(f"params {obj}")
res = self.connector.postData(self.endpoint_company + path, data=obj, headers=self.header)
return res
def createTags(self, data: list = None) -> dict:
"""
Create a new tag and applies that new tag to the passed components.
Arguments:
data : REQUIRED : list of the tag to be created with their component relation.
Example of data :
[
{
"id": 0,
"name": "string",
"description": "string",
"components": [
{
"componentType": "string",
"componentId": "string",
"tags": [
"Unknown Type: Tag"
]
}
]
}
]
"""
if self.loggingEnabled:
self.logger.debug(f"starting createTags")
if data is None:
raise Exception("Requires a list of tags to be created")
path = "/componentmetadata/tags"
if self.loggingEnabled:
self.logger.debug(f"data: {data}")
res = self.connector.postData(self.endpoint_company + path, data=data, headers=self.header)
return res
def deleteTags(self, componentType: str = None, componentIds: str = None) -> str:
"""
Delete all tags from the component Type and the component ids specified.
Arguments:
componentIds : REQUIRED : the Comma-separated list of componentIds to operate on.
componentType : REQUIRED : The component type to operate on.
Available values : segment, dashboard, bookmark, calculatedMetric, project, dateRange, metric, dimension, virtualReportSuite, scheduledJob, alert, classificationSet
"""
if self.loggingEnabled:
self.logger.debug(f"starting deleteTags")
if componentType is None:
raise Exception("require a component type")
if componentIds is None:
raise Exception("require component ID(s)")
path = "/componentmetadata/tags"
params = {
"componentType": componentType,
"componentIds": componentIds
}
res = self.connector.deleteData(self.endpoint_company + path, params=params, headers=self.header)
return res
def deleteTag(self, tagId: str = None) -> str:
"""
Delete a Tag based on its id.
Arguments:
tagId : REQUIRED : The tag ID to be deleted.
"""
if tagId is None:
raise Exception("A tag ID is required")
if self.loggingEnabled:
self.logger.debug(f"starting deleteTag for {tagId}")
path = "/componentmetadata/tags/{tagId}"
res = self.connector.deleteData(self.endpoint_company + path, headers=self.header)
return res
def getComponentTags(self, componentId: str = None, componentType: str = None) -> list:
"""
Given a componentId, return all tags associated with that component.
Arguments:
componentId : REQUIRED : The componentId to operate on. Currently this is just the segmentId.
componentType : REQUIRED : The component type to operate on.
segment, dashboard, bookmark, calculatedMetric, project, dateRange, metric, dimension, virtualReportSuite, scheduledJob, alert, classificationSet
"""
if self.loggingEnabled:
self.logger.debug(f"starting getComponentTags")
path = "/componentmetadata/tags/search"
if componentType is None:
raise Exception("require a component type")
if componentId is None:
raise Exception("require a component ID")
params = {"componentId": componentId, "componentType": componentType}
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
return res
def updateComponentTags(self, data: list = None):
"""
Overwrite the component Tags with the list send.
Arguments:
data : REQUIRED : list of the components to be udpated with their respective list of tag names.
Object looks like the following:
[
{
"componentType": "string",
"componentId": "string",
"tags": [
"Unknown Type: Tag"
]
}
]
"""
if self.loggingEnabled:
self.logger.debug(f"starting updateComponentTags")
if data is None or type(data) != list:
raise Exception("require list of update to be sent.")
path = "/componentmetadata/tags/tagitems"
res = self.connector.putData(self.endpoint_company + path, data=data, headers=self.header)
return res
def getScheduledJobs(self, includeType: str = "all", full: bool = True,limit:int=1000,format:str="df",verbose: bool = False) -> JsonListOrDataFrameType:
"""
Get Scheduled Projects. You can retrieve the projectID out of the tasks column to see for which workspace a schedule
Arguments:
includeType : OPTIONAL : By default gets all non-expired or deleted projects. (default "all")
You can specify e.g. "all,shared,expired,deleted" to get more.
Active schedules always get exported,so you need to use the `rsLocalExpirationTime` parameter in the `schedule` column to e.g. see which schedules are expired
full : OPTIONAL : By default True. It returns the following additional information "ownerFullName,groups,tags,sharesFullName,modified,favorite,approved,scheduledItemName,scheduledUsersFullNames,deletedReason"
limit : OPTIONAL : Number of element retrieved by request (default max 1000)
format : OPTIONAL : Define the format you want to output the result. Default "df" for dataframe, other option "raw"
verbose: OPTIONAL : set to True for debug output
"""
if self.loggingEnabled:
self.logger.debug(f"starting getScheduledJobs")
params = {"includeType": includeType,
"pagination": True,
"locale": "en_US",
"page": 0,
"limit": limit
}
if full is True:
params["expansion"] = "ownerFullName,groups,tags,sharesFullName,modified,favorite,approved,scheduledItemName,scheduledUsersFullNames,deletedReason"
path = "/scheduler/scheduler/scheduledjobs/"
if verbose:
print(f"Getting Scheduled Jobs with Parameters {params}")
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
if res.get("content") is None:
raise Exception(f"Scheduled Job had no content in response. Parameters were: {params}")
# get Scheduled Jobs data into Data Frame
data = res.get("content")
last_page = res.get("lastPage",True)
total_el = res.get("totalElements")
number_el = res.get("numberOfElements")
if verbose:
print(f"Last Page {last_page}, total elements: {total_el}, number_el: {number_el}")
# iterate through pages if not on last page yet
while last_page == False:
if verbose:
print(f"last_page is {last_page}, next round")
params["page"] += 1
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
data += res.get("content")
last_page = res.get("lastPage",True)
if format == "df":
df = pd.DataFrame(data)
return df
return data
def getProjects(self, includeType: str = 'all', full: bool = False, limit: int = None, includeShared: bool = False,
includeTemplate: bool = False, format: str = 'df', cache:bool=False, save: bool = False) -> JsonListOrDataFrameType:
"""
Returns the list of projects through either a dataframe or a list.
Arguments:
includeType : OPTIONAL : type of projects to be retrieved.(str) Possible values:
- all : Default value (all projects possibles)
- shared : shared projects
full : OPTIONAL : if set to True, returns all information about projects.
limit : OPTIONAL : Limit the number of result returned.
includeShared : OPTIONAL : If full is set to False, you can retrieve only information about sharing.
includeTemplate: OPTIONAL : If full is set to False, you can add information about template here.
format : OPTIONAL : format : OPTIONAL : format of the output. 2 values "df" for dataframe (default) and "raw" for raw json.
cache : OPTIONAL : Boolean in case you want to cache the result in the "listProjectIds" attribute.
save : OPTIONAL : If set to True, it will save the info in a csv file (bool : default False)
"""
if self.loggingEnabled:
self.logger.debug(f"starting getProjects")
path = "/projects"
params = {"includeType": includeType}
if full:
params[
"expansion"] = 'reportSuiteName,ownerFullName,tags,shares,sharesFullName,modified,favorite,approved,companyTemplate,externalReferences,accessLevel'
else:
params["expansion"] = "ownerFullName,modified"
if includeShared:
params["expansion"] += ',shares,sharesFullName'
if includeTemplate:
params["expansion"] += ',companyTemplate'
if limit is not None:
params['limit'] = limit
if self.loggingEnabled:
self.logger.debug(f"params: {params}")
res = self.connector.getData(self.endpoint_company + path, params=params, headers=self.header)
if cache:
self.listProjectIds = res
if format == "raw":
if save:
with open('projects.json', 'w') as f:
f.write(json.dumps(res, indent=2))
return res
df = pd.DataFrame(res)
if df.empty == False:
df['created'] = | pd.to_datetime(df['created'], format='%Y-%m-%dT%H:%M:%SZ') | pandas.to_datetime |
# <NAME>
# <EMAIL>
import numpy as np
import pandas as pd
from flam2millijansky.flam2millijansky import flam2millijansky
from hstphot.container import Container
def prepare_KN_nebular_spc(wavelength_angstrom,luminosity_per_angstrom,luminosity_distance_mpc,container):
"""
prepare_KN_nebular_spc function prepares a spectrum file to be in a format recognizable by JWST ETC.
#####
Required:
- pip install flam2millijansky, hstphot
- basic packages in python (e.g., numpy and pandas)
#####
+ Inputs:
- wavelength_angstrom = 1D array of wavelengths in Angstrom, sorted ascending.
- luminosity_per_angstrom = 1D array of luminosity in erg/s/A, parallel to wavelengths.
- luminosity_distance_mpc = a scalar for luminosity distance in Mpc unit.
- container = Container class for specifying the output paths. (See hstphot.container.Container; pip install hstphot).
#####
+ Outputs:
- return a dict with {'micron':values,'mjy':values}
- save to a file defined by container:
> filename: ./{0}/{1}_KN_{2}Mpc.dat where 0 = container.data['savefolder'], 1 = container.data['saveprefix'], and 2 = int(luminosity_distance_mpc).
> Column1 = micron
> Column2 = mjy
> sep = ' '
"""
wavelength_micron = wavelength_angstrom * 1e-4
luminosity_distance_cm = luminosity_distance_mpc * 1e6 * 3.086e18
flam = luminosity_per_angstrom / (4. * np.pi * np.power(luminosity_distance_cm,2))
mjy = flam2millijansky(wavelength_angstrom,flam)
m = np.argwhere(wavelength_micron > 0.).flatten()
out = {'micron':wavelength_micron[m],'mjy':mjy[m]}
out = | pd.DataFrame(out) | pandas.DataFrame |
# License: Apache-2.0
from gators.encoders.woe_encoder import WOEEncoder
from pandas.testing import assert_frame_equal
import pytest
import numpy as np
import pandas as pd
import databricks.koalas as ks
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture
def data():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': [0.0, 0.0, 0.0, 1.38629436, 1.38629436, 1.38629436],
'B': [0.0, 0.0, 0.69314718, 0.69314718, 0.69314718, 0.69314718],
'C': [-0.40546511, -0.40546511, -0.40546511, -0.40546511, 0.69314718, 0.69314718],
'D': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
obj = WOEEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': [0.0, 0.0, 0.0, 1.38629436, 1.38629436, 1.38629436],
'B': [0.0, 0.0, 0.69314718, 0.69314718, 0.69314718, 0.69314718],
'C': [-0.40546511, -0.40546511, -0.40546511, -0.40546511, 0.69314718, 0.69314718],
'D': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}).astype(np.float32)
obj = WOEEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat():
X = pd.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = pd.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = WOEEncoder().fit(X, y)
return obj, X, X.copy()
@pytest.fixture
def data_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': [0.0, 0.0, 0.0, 1.38629436, 1.38629436, 1.38629436],
'B': [0.0, 0.0, 0.69314718, 0.69314718, 0.69314718, 0.69314718],
'C': [-0.40546511, -0.40546511, -0.40546511, -0.40546511, 0.69314718, 0.69314718],
'D': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]})
obj = WOEEncoder().fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame({
'A': ['Q', 'Q', 'Q', 'W', 'W', 'W'],
'B': ['Q', 'Q', 'W', 'W', 'W', 'W'],
'C': ['Q', 'Q', 'Q', 'Q', 'W', 'W'],
'D': [1, 2, 3, 4, 5, 6]})
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
X_expected = pd.DataFrame({
'A': [0.0, 0.0, 0.0, 1.38629436, 1.38629436, 1.38629436],
'B': [0.0, 0.0, 0.69314718, 0.69314718, 0.69314718, 0.69314718],
'C': [-0.40546511, -0.40546511, -0.40546511, -0.40546511, 0.69314718, 0.69314718],
'D': [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]}).astype(np.float32)
obj = WOEEncoder(dtype=np.float32).fit(X, y)
return obj, X, X_expected
@pytest.fixture
def data_no_cat_ks():
X = ks.DataFrame(
np.zeros((6, 3)),
columns=list('ABC'),
)
y = ks.Series([0, 0, 0, 1, 1, 0], name='TARGET')
obj = WOEEncoder().fit(X, y)
return obj, X, X.to_pandas().copy()
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
def test_float32_pd(data_float32):
obj, X, X_expected = data_float32
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_pd_np(data_float32):
obj, X, X_expected = data_float32
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_ks_np(data_float32_ks):
obj, X, X_expected = data_float32_ks
X_numpy = X.to_numpy()
X_numpy_new = obj.transform_numpy(X_numpy)
X_new = pd.DataFrame(X_numpy_new, columns=X_expected.columns)
assert_frame_equal(X_new, X_expected)
def test_no_cat_pd(data_no_cat):
obj, X, X_expected = data_no_cat
X_new = obj.transform(X)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created in September 2020
@author: karliskanders
Functions and classes for generating and analysing career transition recommendations
"""
import pandas as pd
import numpy as np
import pickle
from time import time
import yaml
import os
from ast import literal_eval
from sklearn.preprocessing import normalize
from scipy.spatial.distance import cdist, cosine
from scipy.stats import wilcoxon
from collections import defaultdict
import mapping_career_causeways
import mapping_career_causeways.compare_nodes_utils as compare_nodes_utils
import mapping_career_causeways.load_data_utils as load_data
from mapping_career_causeways.scripts import pickle_large_files
find_closest = compare_nodes_utils.find_closest
useful_paths = mapping_career_causeways.Paths()
data = load_data.Data()
sim = load_data.Similarities()
# Import default skills description embeddings
embeddings = np.load(f'{useful_paths.data_dir}interim/embeddings/embeddings_skills_description_SBERT.npy')
### SET UP DEFAULT TRANSITION FILTERING CRITERIA ###
with open(f'{useful_paths.codebase_dir}configs/default_transition_params.yaml', 'r') as f:
def_transition_params = yaml.load(f, Loader=yaml.FullLoader)
# Viability: Similarity threshold for viable transitions (default = 0.3)
MIN_VIABLE_DEF = def_transition_params['MIN_VIABLE']
# Viability: Similarity threshold for highly viable transitions (default = 0.4)
HIGHLY_VIABLE_DEF = def_transition_params['HIGHLY_VIABLE']
# Viability: Max absolute difference in job zones (default = 1)
MAX_JOB_ZONE_DIF_DEF = def_transition_params['MAX_JOB_ZONE_DIF']
# Desirability: Threshold for differences in earnings (default = 0.75)
MIN_EARNINGS_RATIO_DEF = def_transition_params['MIN_EARNINGS_RATIO']
def occupations_to_check(id_to_check):
"""
Helper function for selecting a list of occupations
Parameters
----------
id_to_check (list of int, or str or None):
List of integers corresponding to occupation IDs, or a string for a shorthand
reference to a predefined set of occupations.
"""
if (type(id_to_check)==type(None)) or (id_to_check=='report'):
id_to_check = data.report_occ_ids
elif id_to_check == 'top':
id_to_check = data.top_occ_ids
elif id_to_check == 'all':
id_to_check = data.occ.id.to_list()
return id_to_check
def find_most_similar(
occ = None,
similarity_measure='combined',
n=15,
destination_ids='report',
transpose=False):
"""
Helper function for finding the most similar occupations that a worker in
the specified occupation could transition to.
Parameters
----------
occ (int or str):
Either the occupation ID (int) or preferred label (str)
similarity_measure (str):
One of the following: 'combined', 'essential_skills', 'optional_skills',
'work_activities', 'work_context'
n (int):
Number of the top-most similar occupations to return
destination_ids (list of int, or str):
List of admissible destination occupations, specified by a list occupation IDs or
a string for a shorthand reference to a predefined set of occupations
transpose (boolean):
If True, it will transpose the similarity matrix and the results will
show the most similar occupations that could transition into the specified occupation
(NB: The skills and combined similarity matrices are asymmetric)
Returns
-------
df (pandas.DataFrame):
A dataframe with the following fields: 'id', 'preferred_label' and 'similarity'
"""
occ_id = data.occ_title_to_id(occ)
destination_ids = occupations_to_check(destination_ids)
sim_matrix = sim.select_similarity_matrix(similarity_measure)
if transpose:
sim_matrix = sim_matrix.T
df = find_closest(occ_id, sim_matrix, data.occ[['id', 'preferred_label']])
df = df[df.id.isin(destination_ids)].iloc[0:n]
return df
def get_transitions(
origin_ids = None,
MIN_VIABLE = MIN_VIABLE_DEF,
HIGHLY_VIABLE = HIGHLY_VIABLE_DEF,
MAX_JOB_ZONE_DIF = MAX_JOB_ZONE_DIF_DEF,
MIN_EARNINGS_RATIO = MIN_EARNINGS_RATIO_DEF,
destination_ids = None,
verbose=False, less_information=False):
"""
Function to find viable, desirable and safe transitions according to the specified filters;
NB: This function outputs only transitions whose occupation similarity is above MIN_VIABLE threshold
Parameters
----------
origin_ids (list of int):
List of origin occupation IDs, for which to check the transitions. If None,
we only check the subset of occupations analysed in the report
MIN_VIABLE (float):
Similarity threshold for viable transitions (default = 0.3)
HIGHLY_VIABLE (float):
Similarity threshold for highly viable transitions (default = 0.4)
MAX_JOB_ZONE_DIF (int):
Max absolute difference in job zones (default = 1)
MIN_EARNINGS_RATIO (float):
Threshold for differences in earnings (default = 0.75)
destination_ids (list of int):
List of permissible destination occupation IDs. If None, we check only
the occupations subset analysed in the report
Returns
-------
trans_df (pandas.DataFrame):
A pandas dataframe with transitions and various descriptors and indicators.
See https://github.com/nestauk/mapping-career-causeways/tree/main/supplementary_online_data/transitions/transitions_tables/
for descriptions for each of the columns.
"""
columns = initialise_transition_table_columns()
origin_ids = occupations_to_check(origin_ids)
destination_ids = occupations_to_check(destination_ids)
# For each occupation in consideration...
if verbose: print('Finding all transitions...', end=' ')
t_now = time()
for j, j_id in enumerate(origin_ids):
# Find the most similar occupations
df = find_closest(j_id, sim.W_combined, data.occ[['id']])
# Filter out self
df = df[df.id!=j_id]
# Filter out occupations that we're not supposed to check
df = df[df.id.isin(destination_ids)]
# Filter out non-viable transitions
df = df[df.similarity > MIN_VIABLE]
# Viable IDs
viable_ids = df.id.to_list()
# Collect data about each transition from j_id to viable_ids
columns = transition_data_processing(
columns, j_id, viable_ids,
MIN_VIABLE,
HIGHLY_VIABLE,
MAX_JOB_ZONE_DIF,
MIN_EARNINGS_RATIO)
if verbose: print(f'Done!\nThis took {(time()-t_now):.2f} seconds.')
trans_df = | pd.DataFrame(data=columns) | pandas.DataFrame |
# Needed libraries
import pandas as pd
from pandas import json_normalize
from coinsta.exceptions import BadSnapshotURL, WrongCoinCode, ApiKeyError
from coinsta.utils import _readable_date, _ticker_checker, _snapshot_readable_date, _parse_cmc_url
from datetime import date, datetime
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
from urllib.error import HTTPError
# Historical Class for all methods related to historical data
class Historical:
"""
A class that provides methods for scraping historical price data
based on specified crypto-currencies and time period from
CoinMarketCap.
"""
def __init__(self, ticker, start, end=None):
"""
This method initialises the Historical object based on the
ticker, starting period, and ending period as specified by
users.
:param ticker: str object representing ticker information.
:param start: a Datetime date object representing YYYYMMDD.
:param end: a Datetime date object representing YYYYMMDD.
"""
# Check for mis-specification of dates
if isinstance(start, date) is False:
raise TypeError("Start argument must be a date object or strings with the alternative 'from_strings' "
"constructor")
# Convert start day into to appropriate format for scraping data from CoinMarketCap
start = start.isoformat().replace("-", "")
# Use today's date unless otherwise specified by user
if end is None:
today = date.today()
formatted_today = today.isoformat().replace("-", "")
end = formatted_today
elif isinstance(end, date) is False:
raise TypeError("End argument must be a date object or \
strings with the alternative 'from_strings' constructor")
else:
end = end.isoformat().replace("-", "")
# Self assign default args
self.ticker = ticker
self.start = start
self.end = end
def __repr__(self):
return "<Historical({0}, {1}, {2})>".format(self.ticker, self.start, self.end)
def __str__(self):
return "Coinsta object: \n crypto_symbol: {0} \n start_period: {1} \n" \
" end_period: {2}".format(self.ticker,
_readable_date(self.start),
_readable_date(self.end)
)
def get_data(self):
"""
This function scrapes and cleans the data of the specified tickers
from CoinMarketCap website.
:return: A Pandas DataFrame object containing historical data on the specified tickers.
"""
# Get the ticker id used by CoinMarketCap
slug = _ticker_checker(self.ticker)
# Custom data url based on the user specified ticker and starting period and ending period
site_url = "https://coinmarketcap.com/currencies/{0}/historical-data/?start={1}&end={2}".format(slug,
self.start,
self.end)
# Download the data based on the custom data url
try:
data = pd.read_html(site_url)
except HTTPError:
raise WrongCoinCode("'{0}' is unavailable on CoinMarketCap.com. "
"Please check the website for the right ticker name".format(slug))
df = data[-2]
# Clean up the DataFrame
df['Date'] = | pd.to_datetime(df['Date']) | pandas.to_datetime |
#! /usr/bin/python3
# Developer: <NAME>
# -*- coding: utf-8 -*-
import os
import subprocess
import pandas as pd
import openpyxl as xl
from pathlib import Path
import pyodbc
# Available functions
# Correct column's name by position
def correct(col_, head_):
for flag_, value_ in head_.items():
# match column position
if col_ == value_:
return flag_
# Search in a dictionary for the value's key
def get_key(val_, dict_):
for key_, value_ in dict_.items():
# match the values and then get the key
if str(val_) == str(value_):
return key_
# Find the index of a value in a list
def find_indeks(val_, list_):
indeks_ = list_.index(val_)
return indeks_
# Calculate the size of the excel spreadsheet
def xls_size(wb_):
n_cols_ = 0
max_n_cols_ = 0
# Create dictionary and count column duplicates
# {cellValue: #}
count_ = {}
# Get the active tab...
sheet_ = wb_.active
max_cols_ = sheet_.max_column
# Loop and use all cell values
rows_ = [[parse_xls_cells_in_row(cell_) for cell_ in row_] for row_ in sheet_.iter_rows()]
n_rows_ = len(rows_)
for col_ in range(1, max_cols_):
sc_ = sheet_.cell(row=1, column=col_).value
count_.setdefault(sc_, 1)
# Count empty cells
if sc_ is not None:
max_n_cols_ += 1
# Count cell value, if it repeats
elif sc_ in count_:
count_[sc_] = count_[sc_] + 1
# Count all cells
else:
pass
n_cols_ += 1
return max_n_cols_, n_cols_, n_rows_, count_
# Select a matching empty global variable to assign its value
def select(d_):
k_list_ = []
# match existing empty variable with the hard-coded dictionary
list_of_globals_ = globals()
for k_ in list_of_globals_:
if k_.startswith(d_):
k_list_.append([k_, list_of_globals_[k_]])
return k_list_
# Parse excel cells from a given row
def parse_xls_cells_in_row(cell_):
cell_value_ = cell_.value
# use only cells that are not empty
if cell_value_ is not None:
return cell_value_ or ''
# Parse an Excel file
def parse_file(file_):
# Get the name
if file_[-5:] == ".xlsx":
f_ = os.path.basename(file_[:-5])
try:
# Open the Excel file
wb_ = xl.load_workbook(file_)
# Get all tabs in the excel sheet...
# sheet_ = wb_.active
if len(wb_.sheetnames) > 1:
for w_name in wb_.sheetnames:
sheet_ = wb_[w_name]
# Parse the data in the tab
rows_ = [[parse_xls_cells_in_row(cell_) for cell_ in row_] for row_ in sheet_.iter_rows()]
# Proceed only if the file is not empty
if rows_ is not None:
# Trim subsequent empty rows
last_row_index_ = max(idx_ for idx_, row_ in enumerate(rows_) if any(val_ for val_ in row_))
rows_ = rows_[:last_row_index_ + 1]
# Debug info
# print("Row:", rows_)
# All rows must have the same number of columns
last_col_index_ = max(max(idx_ for idx_, val_ in enumerate(row_) if val_) for row_ in rows_)
padding_ = [''] * last_col_index_
# Assign the resulting list to a variable
rows_ = [(row_ + padding_)[:last_col_index_ + 1] for row_ in rows_]
return rows_, f_, wb_.sheetnames
else:
rows_ = 0
return rows_, f_, wb_.sheetnames
else:
sheet_ = wb_.active
_sheet_ = str(sheet_)[12:-2]
# Parse the data in the tab
rows_ = [[parse_xls_cells_in_row(cell_) for cell_ in row_] for row_ in sheet_.iter_rows()]
# Trim subsequent empty rows
last_row_index_ = max(i_ for i_, row_ in enumerate(rows_) if any(val_ for val_ in row_))
rows_ = rows_[:last_row_index_ + 1]
# Debug info
# print("Row:", rows_)
# All rows must have the same number of columns
last_col_index_ = max(max(i_ for i_, val_ in enumerate(row_) if val_) for row_ in rows_)
padding_ = [''] * last_col_index_
# Assign the resulting list to a variable
rows_ = [(row_ + padding_)[:last_col_index_ + 1] for row_ in rows_]
return rows_, f_, _sheet_
# Take care of possible exceptions
except FileNotFoundError:
print("No file found")
except IsADirectoryError:
print("The input is not a file, but a directory")
elif file_[:-4] == ".xls":
f_ = os.path.basename(file_[:-4])
try:
# Open the Excel file
wb_ = xl.load_workbook(file_)
# Fetch the active tab ...
sheet_ = wb_.active
# Parse the data in the tab
rows_ = [[parse_xls_cells_in_row(cell_) for cell_ in row_] for row_ in sheet_.iter_rows()]
# Trim subsequent empty rows
last_row_index_ = max(i_ for i_, row_ in enumerate(rows_) if any(val_ for val_ in row_))
rows_ = rows_[:last_row_index_ + 1]
# All rows must have the same number of columns
last_col_index_ = max(max(i_ for i_, val_ in enumerate(row_) if val_) for row_ in rows_)
padding_ = [''] * last_col_index_
# Assign the resulting list to a variable
rows_ = [(row_ + padding_)[:last_col_index_ + 1] for row_ in rows_]
return rows_, f_, sheet_
# Take care of possible exceptions
except FileNotFoundError:
print("No file found")
except IsADirectoryError:
print("The input is not a file, but a directory")
# Take care of other possible file types
else:
raise ValueError("Unexpected file type: {}".format(file_))
# Choose the latest version of each file
def current_version(wd_):
# {file: #}
count_ = {}
# {file: filePath}
file_list_ = {}
# {file#: full filePath}
file_list_paths_ = {}
# Loop through the files found in subdirectories ...
for dir_path_, dirs_, files_ in os.walk(wd_):
for file_ in files_:
# ... select only files with excel extensions ...
if file_.endswith('.xlsx') or file_.endswith('.xls'):
fpath_ = os.path.join(dir_path_, file_)
# ... write down the files and count their duplicates
count_.setdefault(file_, 0)
count_[file_] = count_[file_] + 1
dict_paths_ = {str(file_ + str(count_[file_])): str(fpath_)}
file_list_paths_.update(dict_paths_)
# Loop through.xlsx files and their amount of duplicates
for k_, v_ in count_.items():
# Write down the latest version in the dictionary, if duplicates of such file were found.
if int(v_) > 1:
# {time: filePath}
list_paths = {}
# Loop through the amount of duplicates ...
for f_ in range(1, v_):
path_ = Path(file_list_paths_.get(str(k_) + str(v_)))
time_ = path_.stat().st_mtime
paths_ = {time_: str(path_)}
list_paths.update(paths_)
# ... select the latest key, ...
key_ = max(list_paths)
f_ = os.path.basename(list_paths.get(key_))
file_dup_ = {f_: list_paths.get(key_)}
file_list_.update(file_dup_)
# ... otherwise add direct file names without duplicates
else:
path_ = file_list_paths_.get(str(k_) + str(v_))
f_ = os.path.basename(path_)
file_simp_ = {f_: str(path_)}
file_list_.update(file_simp_)
# ... select only files with .csv extension ...
elif file_.endswith('.csv'):
fpath_ = os.path.join(dir_path_, file_)
# ... write down the files and count their duplicates.
count_.setdefault(file_, 0)
count_[file_] = count_[file_] + 1
dict_paths_ = {str(file_ + str(count_[file_])): str(fpath_)}
file_list_paths_.update(dict_paths_)
# Loop through .csv files and number of duplicates.
for k_, v_ in count_.items():
# Write down the latest version in the dictionary, if duplicates of such file were found.
if int(v_) > 1:
# {time: filePath}
list_paths = {}
# Loop through the amount of duplicates ...
for f_ in range(1, v_):
path_ = Path(file_list_paths_.get(str(k_) + str(v_)))
time_ = path_.stat().st_mtime
paths_ = {time_: str(path_)}
list_paths.update(paths_)
# ... select the latest key, ...
key_ = max(list_paths)
f_ = os.path.basename(list_paths.get(key_))
file_dup_ = {f_: list_paths.get(key_)}
file_list_.update(file_dup_)
# ... otherwise add direct file names without duplicates
else:
path_ = file_list_paths_.get(str(k_) + str(v_))
f_ = os.path.basename(path_)
file_simp_ = {f_: str(path_)}
file_list_.update(file_simp_)
# ... select only files with mapinfo extensions ...
elif file_.endswith('.TAB') or file_.endswith('.tab'):
fpath_ = os.path.join(dir_path_, file_)
# ... write down the files and count their duplicates.
count_.setdefault(file_, 0)
count_[file_] = count_[file_] + 1
dict_paths_ = {str(file_ + str(count_[file_])): str(fpath_)}
file_list_paths_.update(dict_paths_)
# Loop through .tab files and number of duplicates.
for k_, v_ in count_.items():
# Write down the latest version in the dictionary, if duplicates of such file were found.
if int(v_) > 1:
# {time: filePath}
list_paths = {}
# Loop through the amount of duplicates ...
for f_ in range(1, v_):
path_ = Path(file_list_paths_.get(str(k_) + str(v_)))
time_ = path_.stat().st_mtime
paths_ = {time_: str(path_)}
list_paths.update(paths_)
# ... select the latest key, ...
key_ = max(list_paths)
f_ = os.path.basename(list_paths.get(key_))
file_dup_ = {f_: list_paths.get(key_)}
file_list_.update(file_dup_)
# ... otherwise add direct file names without duplicates
else:
path_ = file_list_paths_.get(str(k_) + str(v_))
f_ = os.path.basename(path_)
file_simp_ = {f_: str(path_)}
file_list_.update(file_simp_)
# ... select only files with ESRI shapefile extensions ...
elif file_.endswith('.shp') or file_.endswith('.SHP'):
fpath_ = os.path.join(dir_path_, file_)
# ... write down the files and count their duplicates.
count_.setdefault(file_, 0)
count_[file_] = count_[file_] + 1
dict_paths_ = {str(file_ + str(count_[file_])): str(fpath_)}
file_list_paths_.update(dict_paths_)
# Loop through .shp files and number of duplicates.
for k_, v_ in count_.items():
# Write down the latest version in the dictionary, if duplicates of such file were found.
if int(v_) > 1:
# {time: filePath}
list_paths = {}
# Loop through the amount of duplicates ...
for f_ in range(1, v_):
path_ = Path(file_list_paths_.get(str(k_) + str(v_)))
time_ = path_.stat().st_mtime
paths_ = {time_: str(path_)}
list_paths.update(paths_)
# ... select the latest key, ...
key_ = max(list_paths)
f_ = os.path.basename(list_paths.get(key_))
file_dup_ = {f_: list_paths.get(key_)}
file_list_.update(file_dup_)
# ... otherwise add direct file names without duplicates
else:
path_ = file_list_paths_.get(str(k_) + str(v_))
f_ = os.path.basename(path_)
file_simp_ = {f_: str(path_)}
file_list_.update(file_simp_)
# ... select only files with access database extensions ...
elif file_.endswith('.accdb'):
fpath_ = os.path.join(dir_path_, file_)
# ... write down the files and count their duplicates.
count_.setdefault(file_, 0)
count_[file_] = count_[file_] + 1
dict_paths_ = {str(file_ + str(count_[file_])): str(fpath_)}
file_list_paths_.update(dict_paths_)
# Loop through .accdb files and number of duplicates.
for k_, v_ in count_.items():
# Write down the latest version in the dictionary, if duplicates of such file were found.
if int(v_) > 1:
# {time: filePath}
list_paths = {}
# Loop through the amount of duplicates ...
for f_ in range(1, v_):
path_ = Path(file_list_paths_.get(str(k_) + str(v_)))
time_ = path_.stat().st_mtime
paths_ = {time_: str(path_)}
list_paths.update(paths_)
# ... select the latest key, ...
key_ = max(list_paths)
f_ = os.path.basename(list_paths.get(key_))
file_dup_ = {f_: list_paths.get(key_)}
file_list_.update(file_dup_)
# ... otherwise add direct file names without duplicates
else:
path_ = file_list_paths_.get(str(k_) + str(v_))
f_ = os.path.basename(path_)
file_simp_ = {f_: str(path_)}
file_list_.update(file_simp_)
return file_list_
###################################################################################################
###################################################################################################
# Define global variables and create the needed dictionaries
out_path = r"/path/to/work/_data/_pyOutput/tmp/"
out_csv = r"/path/to/work/_data/_pyOutput/csv_out/"
in_path = r"/path/to/work/_data/_orgnl/0Samlet_data/"
file_search = ("Andexxxxxxxa.xlsx", "Angxxxxxxxxn.xlsx", "bxxxxxxxxxxxxi.shp",
"bxxxxxxxxxxxn.shp", "bxxxxxxxxxxxi.shp", "bxxxxxxxxxxxxxxxxxxn.shp",
"bxxxxxxxxxxxxxxxxx.shp", "bxxxxxxxxxxxxxxxn.shp", "baxxxxxxxxxxxxxxxxx.shp",
"bxxxxxxxxxxxxxxxxxn.shp", "Baxxxxxxxxxxxxb.xlsx", "Bxxxxxxxxxxxxxx.xlsx", "Dxxxxxxxxxxxn.xlsx",
"Dxxxxxxxxxxxxxx.xlsx", "Fxxxxxxxxxxxxxxxx.xlsx", "Fxxxxxxxx.xlsx", "Fxxxxxxxxxxxn.xlsx",
"Fxxxxxxxxxxxxxx.xlsx", "gxxxxxxxxxxx.accdb", "gxxxxxxxxxxxx.accdb", "Ixxxxxxxxxxxxxx.xlsx",
"Ixxxxxxxxxxxxxxxxx.xlsx", "Ixxxxxxxxxx.xlsx", "Kxxxxxxxxxxxxxx.xlsx", "Lxxxxxxxxxxxxxxx.xlsx",
"Mxxxxxxxxxxxxxxxn.xlsx", "Maxxxxxxxxxxxxx.xlsx", "Mxxxxxxxxxxxxxxxxxxxxxxxxxxd.xlsx",
"Mxxxxxxxxxxxxxxxxxxxxxxx.xlsx", "Mxxxxxxxxxxxe.xlsx", "Mxxxxxxxxxxxxxxxxxxxx.xlsx",
"Mxxxxxxxxxxxxxxxxxxxxxx.xlsx", "Mxxxxxxxxxxxxxxxxxxxx.xlsx", "Nxxxxxxxxxxxxxxxxxx.xlsx",
"Rxxxxxxxxxxxxxx.xlsx", "Rxxxxxxxxxxxxxxxxxxx.xlsx", "Sxxxxxxxxxxx.xlsx", "Uxxxxxxxxxxxxxxxxxxxxxx.xlsx",
"Uxxxxxxxxxxxx.xlsx", "Uxxxxxxxxxxxxx.xlsx", "Uxxxxxxxxxxxxxx.xlsx", "xxxxxxxxxxxxxxx.xlsx",
"vxxxxxxxxxxxxxxxxxxxxxxxxxxxx.shp", "vxxxxxxxxxxxxxxxxxxxxxxxxxx.xlsx",
"vxxxxxxxxxxxxxxxxxxxxxx.xlsx", "vxxxxxxxxxxxxxxxxxxxxx.shp", "Vxxxxxxxxxxxxxxxxx.csv")
# Create dynamic variable lists for the Access database tables
gwPollutant = []
gwPollutant_baggrundsdata = []
# Set the working directory
os.chdir(in_path)
cwd_ = os.getcwd()
print("The cwd is now:", cwd_, "\n")
# Create dynamic dictionaries for setting up data frames.
empty_column_list = {}
duplicate_column_list = {}
# {filePath: row}
dfs_dict = {}
# Create blank data.frame for data
data_df = []
# Create blank data.frame for metadata
info_df = []
# Create two blank lists for column ids, and for their data, in order to later create a data.frame with custom data type
index_df_name = []
index_df_data = []
# Call the functions
# Fetch data from the path and check for file versions
file_list = current_version(cwd_)
print(file_list)
# Convert .tab to .shp, and then to .csv and load as data.frame
n = 0
for fil in file_list:
if fil[-4:] == ".TAB" or fil[-4:] == ".tab":
# Warn about problems with ogr2ogr translations, when it comes to Mapinfo files
# There is no time to test, if there is trouble with other files, these seem to work fine.
print("----------------------------------------WARNING: ----------------------------------------")
print("At this point, Mapinfo files are poorly supported by the ogr translation drivers used here.")
print("The output file may have lost many data rows. Copy/paste manually from the Mapinfo table to.csv/.xlsx")
# Give a name to the output file.
print("Converting %s to shp" % fil)
shp_file = fil[:-4] + '.shp'
# Define the input file.
in_tab = file_list.get(str(fil))
# Define the output file.
out_shp = os.path.join(out_path, shp_file)
print("----", out_shp, "-------------------------------------------------", "\n")
# Convert Format (first step)
subprocess.check_output(['ogr2ogr', '-f', "ESRI Shapefile", str(out_shp), str(in_tab)])
csv_file = fil[:-4] + '.csv'
# Define the intermediate output file
out_csv_ = Path(out_csv, csv_file)
# Convert Format (final step)
subprocess.check_output(['ogr2ogr', '-f', "CSV", str(out_csv_), str(out_shp)])
tab_file = pd.read_csv(out_csv_, dtype=object)
# Check for any last irregular columns
if 'NaN' in tab_file.head():
to_drop = ['NaN']
tab_file.drop(to_drop, inplace=True, axis=1)
# Collect data.frames in series
data_df.append(tab_file)
# Make an overview of the location of data.frames, i.e. their different files
df_dict_ = {os.path.basename(fil): n}
dfs_dict.update(df_dict_)
n += 1
elif fil[-4:] == ".csv":
# Define the input file.
in_csv = file_list.get(str(fil))
# Read pandas data csv data.frame
csv_file = pd.read_csv(in_csv, sep=';', dtype=object)
# Check for any last irregular columns
if 'NaN' in csv_file.head():
to_drop = ['NaN']
csv_file.drop(to_drop, inplace=True, axis=1)
# Collect data.frames in series
data_df.append(csv_file)
# Make an overview of the location of data.frames, i.e. their different files
df_dict_ = {os.path.basename(fil): n}
dfs_dict.update(df_dict_)
n += 1
# Convert all shp to csv and load as data.frame
elif fil[-4:] == ".shp":
# Give a name to the output file.
print("Converting %s to csv" % fil)
csv_file = str(fil[:-4]) + ".csv"
# Define the input file.
in_shp = file_list.get(str(fil))
# Define the intermediate output file
out_csv_ = os.path.join(out_csv, csv_file)
print("----", out_csv_, "-------------------------------------------------", "\n")
# Format converter
subprocess.check_output(['ogr2ogr', '-f', "CSV", str(out_csv_), str(in_shp)])
shp_file = | pd.read_csv(out_csv_, dtype=object) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[47]:
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.express as px
from dash.dependencies import Input, Output
import numpy as np
import plotly.graph_objects as go
import dash_bootstrap_components as dbc
from dash.exceptions import PreventUpdate
import requests
import re
import pandas as pd
import numpy as np
from bs4 import BeautifulSoup
import json
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.pyplot as plt
import base64
# In[48]:
server = app.server
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.SOLAR])
##################
# Controls
##################
# This creates the gray column with the slider on the left
LEFT_COLUMN = dbc.Jumbotron(
[
html.H4(children="Data Togglers", className="display-5"),
html.Hr(className="my-2"),
html.Label("Select Date Range", className="lead"),
html.P(
"(Use the two drag bars to select the date range)",
style={"fontSize": 10, "font-weight": "lighter"},
),
html.Div(id='time_slider', style={"marginBottom": 80, "font-size": 12}),
# USE THIS TO CHANGE HOW LOW YOU WANT THE SIDE BAR TO GO
html.Label("Narrow by Indication and Phase of Development", className="lead"),
html.P(
"(Use the two drag bars to select the date range)",
style={"fontSize": 10, "font-weight": "lighter"},
),
dbc.Label("Select Indications"),
dcc.Dropdown(id='indication_selections',
placeholder='None',
multi=True),
dbc.Label("Select Phase"),
dcc.Dropdown(id='phase_selections',
placeholder='None',
multi=True),
dbc.Label("Select Sponsor Type"),
dcc.Dropdown(id='sponsor_type',
placeholder='None',
multi=True)
]
)
# This creates the graph
ClINICAL_TRIALS_DATA = [
dbc.CardHeader(html.H5("Clinical Trials Over This Time Period")),
dbc.CardBody(
[dbc.Alert(
"Not enough data to render this plot, please adjust the filters",
id="no-data",
color="warning",
style={"display": "none"},
),
html.Div(dcc.Graph(id='timeline', className="dash-bootstrap"),
style={"marginBottom": 80, "font-size": 12}),
],
)
]
ClINICAL_TRIALS_METRICS = [
dbc.CardHeader(html.H5("Study Characteristics")),
dbc.CardBody(
[
dcc.Loading(
id="loading-clin-data",
children=[
dbc.Alert(
"Not enough data to render this plot, please adjust the filters",
id="nont-enough-data",
color="warning",
style={"display": "none"},
),
dbc.CardGroup([
dbc.Card(dcc.Graph(id='phase-bars')),
dbc.Card(dcc.Graph(id='company-pie')),
dbc.Card(dcc.Graph(id='sponsor-graph')),
dbc.Card(dcc.Graph(id='indication-bar')),
]),
dbc.CardHeader(html.H5("Trial Averages")),
dbc.CardGroup([
dbc.Card(dcc.Graph(id='study-size')),
dbc.Card(dcc.Graph(id='study-duration'))])
],
style={"marginTop": 1, "marginBottom": 1, "width": 1},
),
]
)]
BODY = dbc.Container(
[dbc.Row([
dbc.Col(LEFT_COLUMN, md=2, align="top"), # Use the brackets to keep them in the same row
dbc.Col(dbc.Card(ClINICAL_TRIALS_DATA), md=10),
],
style={"marginTop": 20},
),
dbc.Row([
dbc.Col(ClINICAL_TRIALS_METRICS, md=12)],
className="mt-12", )
], fluid=True)
# In[49]:
####################
# App
####################
app.layout = html.Div([html.Div([
html.Label("Enter the year the product enters the market"),
dcc.Input(id='dz', type="text", debounce=True, placeholder='lupus'),
html.Label("Enter the earliest date"),
dcc.Input(id='earl', type="text",
debounce=True, placeholder='earliest date, yyyy-mm-dd: '),
html.Label("Enter the latest date"),
dcc.Input(id='late', type="text",
debounce=True, placeholder='latest date, yyyy-mm-dd: '),
html.Button('SEARCH', id='search'),
dcc.Store(id='trials'),
html.Div(id='studies'),
html.Div(children=[BODY])
])
])
# In[50]:
############
# Call backs to activate the clinicaltrials.gov api and store data
###########
@app.callback(
Output('trials', 'data'),
[Input('search', 'n_clicks'),
Input('dz', 'value'),
Input('earl', 'value'),
Input('late', 'value')]
)
def get_data(n_clicks, dz, earl, late):
if n_clicks is None:
raise PreventUpdate
else:
disease = dz
url = 'https://clinicaltrials.gov/api/query/study_fields?expr={}&fmt=JSON&type=Intr&max_rnk=999&fields=NCTId,Condition,BriefTitle,OrgFullName,LeadSponsorClass,StartDate, PrimaryCompletionDate,PrimaryOutcomeMeasure,InterventionDescription,Phase,InterventionName,InterventionType,DetailedDescription,EnrollmentCount,CentralContactName,CentralContactEMail'.format(
disease)
response = requests.get(url)
soup = BeautifulSoup(response.content, 'lxml')
elements = soup.find("p").text
data = json.loads(elements)
data_list = data['StudyFieldsResponse']['StudyFields']
clinicalgov_df = pd.DataFrame(data_list)
clinicalgov_df = clinicalgov_df.drop(columns='Rank')
clinicalgov_df = clinicalgov_df.apply(lambda x: x.str[0])
clinicalgov_df.sort_values(by=['Phase'], inplace=True, ascending=False)
clinicalgov_df['Phase'] = clinicalgov_df['Phase'].astype(
str) # for some reason they're floats, turning to strings
clinicalgov_df = clinicalgov_df[
~clinicalgov_df.Phase.str.contains('Phase 4')] # this is likely repurposing, or other stuff not interesting
# clinicalgov_df = clinicalgov_df[~clinicalgov_df.Phase.str.contains('Not Applicable')] #obviously
# clinicalgov_df = clinicalgov_df[~clinicalgov_df.Phase.str.contains('nan')]# obviously
# clinicalgov_df = clinicalgov_df[~clinicalgov_df.Phase.str.contains('Early Phase 1')] #too eary to be relevant
# clinicalgov_df = clinicalgov_df[clinicalgov_df.InterventionType.isin(['Drug', 'Biological'])] #Only keeps drugs, and biologics in the dataframe, drop all other intervention types
clinicalgov_df['ph_num'] = clinicalgov_df.Phase.str.extract('(\d+)') # extract numeric of phases
clinicalgov_df['ph_num'] = clinicalgov_df['ph_num'].astype(float)
clinicalgov_df['name_phase'] = [' '.join(i) for i in
zip(clinicalgov_df['InterventionName'].map(str), clinicalgov_df['Phase'])]
# clinicalgov_df['name_phase'] = [' '.join(i) for i in zip(clinicsalgov_df['name_phase'].map(str), clinicalgov_df['OrgFullName'])]
earliest = earl
latest = late
clinicalgov_df['StartDate'] = pd.to_datetime(clinicalgov_df['StartDate'])
clinicalgov_df['PrimaryCompletionDate'] = pd.to_datetime(
clinicalgov_df['PrimaryCompletionDate']) # --converts dates to time stamp
clinicalgov_dff = clinicalgov_df[
(clinicalgov_df['PrimaryCompletionDate'] > earliest) & (clinicalgov_df['PrimaryCompletionDate'] < latest)]
clinicalgov_dff['level'] = np.tile([-60, 60, -50, 50, -40, 40, -30, 30, -10, 10, -5, 5, -1, 1],
int(np.ceil(len(clinicalgov_dff['PrimaryCompletionDate']) / 14)))[
:len(clinicalgov_dff['PrimaryCompletionDate'])]
# Reset the index of the clinicalgov_dff by date
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
return clinicalgov_dff.to_dict()
# In[51]:
####################################
# Callbacks - Display a study to show the data request worked
####################################
@app.callback(
Output("studies", "children"),
[Input("trials", "data")])
def update_output(data):
dff = pd.DataFrame(data)
return dff.iloc[2, 2]
# In[52]:
@app.callback(Output('indication_selections', 'options'),
Output('sponsor_type', 'options'),
Output('phase_selections', 'options'),
Input('trials', 'data'))
def create_left_col(data):
##########################################################
# THESE ARE THE FILTER VALUES USED LATER TO FILTER THE DATA
#########################################################
clinicalgov_dff = pd.DataFrame(data)
indications = clinicalgov_dff['Condition'].dropna().sort_values(ascending=False).unique()
indication_selections = [{'label': i, 'value': i} for i in indications]
sponsors = clinicalgov_dff['LeadSponsorClass'].unique()
sponsor_type = [{'label': i, 'value': i} for i in sponsors]
phases = clinicalgov_dff['Phase'].sort_values(ascending=False).unique()
phase_selections = [{'label': i, 'value': i} for i in phases]
return indication_selections, sponsor_type, phase_selections
# In[53]:
@app.callback(Output('time_slider', 'children'),
Input('trials', 'data'))
def timeline_data(data):
clinicalgov_dff = pd.DataFrame(data)
numdate = [x for x in range(len(clinicalgov_dff['PrimaryCompletionDate'].sort_values(ascending=True)))]
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True)
slider = dcc.RangeSlider(id='time-slider',
updatemode='drag',
allowCross=False,
min=numdate[0], # the first date
max=numdate[-1], # the last date
value=[5 - 5, 5],
)
return slider
# In[54]:
############################################################
# Callbacks - Clinical Trials Over This Time Period
############################################################
@app.callback(
Output('timeline', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_timeline(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
# None of the 3 are selected
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
# Right now there are 3 options, so 2 (is none or is not none) x 2 x 2 means there should be 8 options
# None of the 3 are selected
if indication_selections is None and phase_selections is None and sponsor_type is None:
dff = dff
# All 3 are selected
if indication_selections is not None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor Only
if indication_selections is None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Indication
if indication_selections is not None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Phase
if indication_selections is None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication and Phase
if indication_selections is not None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication Only
if indication_selections is not None and phase_selections is None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Phase only
if indication_selections is None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
fig = px.scatter(dff,
x='PrimaryCompletionDate',
y='level',
text='name_phase',
hover_data=['InterventionName', 'Phase', 'Condition', 'OrgFullName', 'NCTId'],
color=dff['Phase'],
color_discrete_map={'Phase 1': 'lightcyan', '{Phase 2}': 'royalblue', 'Phase 3': 'darkblue'}
)
fig.update_traces(textposition='top center', textfont=dict(family="sans serif"), textfont_size=14)
fig.update_xaxes(showgrid=False)
fig.update_xaxes(zeroline=True, zerolinewidth=2, zerolinecolor='Black')
fig.update_yaxes(showgrid=False)
fig.update_yaxes(visible=False, showticklabels=False)
return fig
# In[55]:
@app.callback(
Output('phase-bars', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value')])
def create_phase_bars(data, value):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
phase_dict = dict({'Phase 1': int(len(dff[dff['Phase'].str.contains('1')])),
'Phase 2': int(len(dff[dff['Phase'].str.contains('2')])),
'Phase 3': int(len(dff[dff['Phase'].str.contains('3')]))})
phase_clinicalgov_df = pd.DataFrame.from_dict(phase_dict, orient='index', columns=['Count'])
fig = px.bar(phase_clinicalgov_df,
x=phase_clinicalgov_df.index,
y='Count',
color=phase_clinicalgov_df.index, color_discrete_map={'Phase 1': 'lightcyan',
'{Phase 2}': 'royalblue', 'Phase 3': 'darkblue'})
fig.update_layout(showlegend=False)
fig.update_xaxes(showgrid=False)
fig.update_yaxes(showgrid=False)
return fig
# In[56]:
@app.callback(
Output('company-pie', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_company_pie(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
# Right now there are 3 options, so 2 (is none or is not none) x 2 x 2 means there should be 8 options
# None of the 3 are selected
if indication_selections is None and phase_selections is None and sponsor_type is None:
dff = dff
# All 3 are selected
if indication_selections is not None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor Only
if indication_selections is None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Indication
if indication_selections is not None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Phase
if indication_selections is None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication and Phase
if indication_selections is not None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication Only
if indication_selections is not None and phase_selections is None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Phase only
if indication_selections is None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
company_counts = dff.groupby('OrgFullName')['NCTId'].nunique().reset_index()
company_counts = company_counts.rename(columns={'OrgFullName': 'Company', 'NCTId': 'Study Counts'})
company_counts = company_counts.sort_values(by='Study Counts', ascending=False)
fig = px.pie(company_counts,
values='Study Counts',
names='Company')
fig.update_layout(showlegend=False)
fig.update_traces(textposition='outside', textinfo='text + label ', hole=.4)
return fig
# In[57]:
@app.callback(
Output('sponsor-graph', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_sponsor_pie(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
# Right now there are 3 options, so 2 (is none or is not none) x 2 x 2 means there should be 8 options
# None of the 3 are selected
if indication_selections is None and phase_selections is None and sponsor_type is None:
dff = dff
# All 3 are selected
if indication_selections is not None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor Only
if indication_selections is None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Indication
if indication_selections is not None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Phase
if indication_selections is None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication and Phase
if indication_selections is not None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication Only
if indication_selections is not None and phase_selections is None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Phase only
if indication_selections is None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
sponsor_counts = dff.groupby('LeadSponsorClass')['NCTId'].nunique().reset_index()
sponsor_counts = sponsor_counts.rename(columns={'LeadSponsorClass': 'Sponsor', 'NCTId': 'Study Counts'})
sponsor_counts = sponsor_counts.sort_values(by='Study Counts', ascending=False)
fig = px.pie(sponsor_counts,
values='Study Counts',
names='Sponsor')
fig.update_layout(showlegend=False)
fig.update_traces(textposition='outside', textinfo='text + label ', hole=.4)
return fig
# In[58]:
@app.callback(
Output('indication-bar', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_indication_bar(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
# Right now there are 3 options, so 2 (is none or is not none) x 2 x 2 means there should be 8 options
# None of the 3 are selected
if indication_selections is None and phase_selections is None and sponsor_type is None:
dff = dff
# All 3 are selected
if indication_selections is not None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor Only
if indication_selections is None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Indication
if indication_selections is not None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Phase
if indication_selections is None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication and Phase
if indication_selections is not None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication Only
if indication_selections is not None and phase_selections is None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Phase only
if indication_selections is None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
indication_counts = dff.groupby('Condition')['NCTId'].nunique().reset_index()
indication_counts = indication_counts.rename(columns={'Condition': 'Disease', 'NCTId': 'Study Counts'})
indication_counts = indication_counts.sort_values(by='Study Counts', ascending=True)
fig = px.bar(indication_counts,
x='Study Counts',
y='Disease',
color=indication_counts.index,
orientation='h')
fig.update_layout(showlegend=False)
fig.update_yaxes(visible=True, showticklabels=True)
fig.update_layout(coloraxis_showscale=False)
return fig
# In[59]:
@app.callback(
Output('study-size', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_study_size_bar(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff = clinicalgov_dff.sort_values(by='PrimaryCompletionDate', ascending=True).reset_index(drop=True)
dff = clinicalgov_dff[(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1])]
# Right now there are 3 options, so 2 (is none or is not none) x 2 x 2 means there should be 8 options
# None of the 3 are selected
if indication_selections is None and phase_selections is None and sponsor_type is None:
dff = dff
# All 3 are selected
if indication_selections is not None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor Only
if indication_selections is None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Indication
if indication_selections is not None and phase_selections is None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Sponsor and Phase
if indication_selections is None and phase_selections is not None and sponsor_type is not None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
clinicalgov_dff['LeadSponsorClass'].str.contains('|'.join(sponsor_type))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication and Phase
if indication_selections is not None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Indication Only
if indication_selections is not None and phase_selections is None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Condition'].str.contains('|'.join(indication_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
# Phase only
if indication_selections is None and phase_selections is not None and sponsor_type is None:
dff = clinicalgov_dff.loc[(clinicalgov_dff['Phase'].str.contains('|'.join(phase_selections))) & (
(clinicalgov_dff['PrimaryCompletionDate'].index >= value[0]) & (
clinicalgov_dff['PrimaryCompletionDate'].index <= value[1]))]
study_size = dff
study_size['EnrollmentCount'] = study_size['EnrollmentCount'].astype(float)
study_size = round(study_size.groupby('Phase')['EnrollmentCount'].mean(), 1).reset_index()
study_size = study_size.rename(columns={'Phase': 'Phase', 'EnrollmentCount': '# Participants'})
study_size = study_size.sort_values(by='Phase', ascending=False)
fig = px.bar(study_size,
x='# Participants',
y='Phase',
color=study_size['Phase'],
color_discrete_map={'Phase 1': 'lightcyan',
'{Phase 2}': 'royalblue', 'Phase 3': 'darkblue'},
orientation='h')
fig.update_layout(showlegend=False)
fig.update_yaxes(visible=True, showticklabels=True)
fig.update_layout(coloraxis_showscale=False)
fig.update_layout(font_color='yellow')
fig.update_layout({
'plot_bgcolor': 'rgba(0,0,0,0)',
'paper_bgcolor': 'rgba(0,0,0,0)'})
fig.update_xaxes(showgrid=False)
return fig
# In[60]:
@app.callback(
Output('study-duration', 'figure'),
[Input('trials', 'data'),
Input('time-slider', 'value'),
Input('indication_selections', 'value'),
Input('phase_selections', 'value'),
Input('sponsor_type', 'value')])
def create_study_duration_bar(data, value, indication_selections, phase_selections, sponsor_type):
clinicalgov_dff = pd.DataFrame(data)
# Converts dates to time stamp
clinicalgov_dff['PrimaryCompletionDate'] = pd.to_datetime(clinicalgov_dff['PrimaryCompletionDate'])
clinicalgov_dff['StartDate'] = | pd.to_datetime(clinicalgov_dff['StartDate']) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = | pd.Index([0, 1, 2, 3, 1.1]) | pandas.Index |
import pandas as pd
import textacy
import textblob
import en_core_web_sm
nlp = en_core_web_sm.load()
# Multiprocessing Imports
from dask import dataframe as dd
from dask.multiprocessing import get
from multiprocessing import cpu_count
# Sentiment Imports
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
# Local Imports
from src.utils.pandas_utils import pivot_df_to_row
##
def text_vectorize_and_cluster(text, df=None, vectorizer=None, clusterer=None,
vector_params=None, clusterer_params=None,
outlier_scores=False, one_hot_labels=False, return_df=False,
return_type='clusters'):
""" Given processed text, vectorize and cluster it. Return cluster labels or cluster labels
along with fitted vectorizer and clusterer.
Parameters
----------
text : object
Object which contains text that will be passed to the transformer's .fit_transform() method
As such, text must already be processed and in correct format.
df : Pandas DataFrame
Optional dataframe attach clustering results to
vectorizer: object
Class for text vectorization. Must follow sklearn transformer convention and
implement .fit_transform() method
E.g. CountVectorizer from sklearn
vector_params: dict[str:obj]
Dictionary to pass to vectorizer as parameters
clusterer: object
Class for clustering. Must follow sklearn estimator convention and
implement .fit_predict() method for implementing cluster assignment
clusterer_params: dict[str:obj]
Dictionary to pass to clusterer as parameters
outlier_scores: boolean
Flag to indicate outlier scores computed by clusterer. Accessed
from clusterer.outlier_scores_ attribute
one_hot_labels: boolean
Flag to indicate if cluster labels should be one hot encoded
instead of returns as a one dimensional array of ordinal
integer labels
return_df: boolean
Flag to indicate if results should be returned concatenated
with the dataframe passed to 'df' kword arg
return_type: str in ['clusters', 'all', ]
String indicating return type. Must be on of ['clusters', 'all', 'df']
clusters: Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
all: Return the fitted vectorizer, clusterer and cluster label results
Returns
-------
clusters: pd.Series or pd.DataFrame
Return the cluster results as a one dimensional array of ordinal
integer labels or concatenated to dataframe if return_df=True
clusters, vectorizer, clusterer: object, object, pd.Series or pd.DataFrame
Return the fitted vectorizer, clusterer and cluster label results
"""
# Check vectorizer and clusterer for correct methods
assert "fit_transform" in dir(vectorizer), "vectorizer has no 'fit_transform' method"
assert "fit_predict" in dir(clusterer), "clusterer has no 'fit_predict' method"
if return_df:
assert isinstance(df, pd.DataFrame), "If specifying 'return_df', data must be passed to argument 'df'"
# Instantiate vectorizer with params if specified
if vector_params:
vectorizer = vectorizer(**vector_params)
# Else instantiate the vectorizer
elif vectorizer:
vectorizer = vectorizer()
# Fit and trasnform text to vectors
vectors = vectorizer.fit_transform(text)
# Instantiate vectorizer with params if specified
if clusterer_params:
clusterer = clusterer(**clusterer_params)
elif clusterer:
clusterer = clusterer()
# Fit and trasnform vectors to clusters
cluster_labels = clusterer.fit_predict(vectors)
if len(set(clusterer.labels_)) <= 1:
return print('Clusterer could not find any meaningful labels. All data would fall under one cluster')
# Create DataFrame of Cluster Labels
results = pd.DataFrame(cluster_labels, columns=['Cluster_Label'])
# Add Outlier Score if specified
if outlier_scores:
results['Outlier_Score'] = clusterer.outlier_scores_
# Add labels as dummy variables
if one_hot_labels:
one_hot_cols = pd.get_dummies(results['Cluster_Label'], prefix='Cluster_Label')
one_hot_col_names = one_hot_cols.columns.values.tolist()
results = pd.merge(results, one_hot_cols, left_index=True, right_index=True)
# Attach to data if specified
if return_df:
results = pd.merge(df, results, left_index=True, right_index=True)
# Return all or just cluster results
if return_type == 'all':
return results, vectorizer, clusterer
elif return_type == 'clusters':
return results
def dask_df_textacy_apply(df, text_col, textacy_col_name='textacy_doc', ncores=None, inplace=False):
"""
Use dask to parallelize apply textacy Doc object creation from a dataframe
Parameters
----------
df : DataFrame
Dataframe which holds the text
text_col : str
The name of the text column in the df
textacy_col_name : str
The name to give to the column with the textacy doc objects
ncores : int
Number of cores to use for multiprocessing. Defaults to all cores in cpu minus one.
inplace : bool
Whether to return the entire df with the textacy doc series concatenated
or only textacy doc series.
Default is False
Returns
-------
DataFrame / Series
Either the dataframe passed as arg with the textacy series as last column or
just the textacy column
"""
# If no number of cores to work with, default to max
if not ncores:
nCores = cpu_count() - 1
nCores
# Partition dask dataframe and map textacy doc apply
# Sometimes this fails because it can't infer the dtypes correctly
# meta=pd.Series(name=0, dtype='object') is a start
# This is also a start https://stackoverflow.com/questions/40019905/how-to-map-a-column-with-dask?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
# Possibly both the inner lambda apply and outer lambda df both need metadata?
textacy_series = dd.from_pandas(df, npartitions=nCores).map_partitions(
lambda df : df[text_col].apply(lambda x : textacy.doc.Doc(x, lang=nlp))).compute(get=get)
# Name the series
textacy_series.name = textacy_col_name
# If inplace return the dataframe and textacy Series
if inplace:
return pd.concat([df, textacy_series], axis=1)
# Else return just the Textacy series
else:
return textacy_series
def load_textacy_corpus(df, text_col, metadata=True, metadata_columns=None):
# Fill text columns nulls with empty strings
df[text_col] = df[text_col].fillna('')
if metadata:
# Default to metadata columns being every column except the text column
metadata_cols = list(df.columns)
# If list is provided use those
if metadata_columns:
metadata_cols = metadata_columns
# Add text column to metadata columns
# These will constitute all the information held in the textacy corpus
metadata_columns.append(text_col)
# Subset to these
df = df[metadata_cols]
# Convert to nested dict of records
records = df.to_dict(orient='records')
# Split into text and metadata stream
text_stream, metadata_stream = textacy.io.split_records(records, text_col)
# Create Corpus
return textacy.corpus.Corpus(lang='en', texts=text_stream, metadatas=metadata_stream)
# With no metadata
else:
text_stream = (text for text in df[text_col].values)
return textacy.corpus.Corpus(lang='en', texts=text_stream)
# Entity Extraction
def corpus_entity_counts(corpus, include=None, exclude=None):
"""
Given a textacy corpus, return a dataframe of entities and their respective counts.
Parameters
----------
corpus : int
Description of arg1
include : str or Set[str]
Remove named entities whose type IS NOT in this param;
if “NUMERIC”, all numeric entity types (“DATE”, “MONEY”, “ORDINAL”, etc.) are included
exclude : str or Set[str]
remove named entities whose type IS in this param; if “NUMERIC”,
all numeric entity types (“DATE”, “MONEY”, “ORDINAL”, etc.) are excluded
Returns
-------
Dataframe
A pandas dataframe with entities and their respective counts, sorted by highest count
"""
from collections import Counter
# Extract all entities
entities = [list(textacy.extract.named_entities(doc, include_types=include, exclude_types=exclude))
for doc in
corpus]
# Pull all non-null entities to flattened list
non_null_entities = []
for entity in entities:
if entity:
non_null_entities.extend(entity)
# Change dtype to string so counter can distinguish
non_null_entities = [str(x) for x in non_null_entities]
# Count entities
entity_counts = Counter(non_null_entities)
# Entity Dataframe
df = (pd.DataFrame.from_dict(entity_counts, orient='index')
.reset_index()
.rename(columns={'index':'Entity', 0:'Count'})
.sort_values(by='Count', ascending=False)
.reset_index(drop=True))
return df
def entity_statements(doc, entity, ignore_entity_case=True,
min_n_words=1, max_n_words=300, return_entity=False):
"""
Extract sentences with a specified entity present in it
Modified from source code of Textacy's textacy.extract.semistructured_statements()
Args:
doc (``textacy.Doc`` or ``spacy.Doc``)
entity (str): a noun or noun phrase of some sort (e.g. "President Obama",
"global warming", "Python")
ignore_entity_case (bool): if True, entity matching is case-independent
min_n_words (int): min number of tokens allowed in a matching fragment
max_n_words (int): max number of tokens allowed in a matching fragment
Yields:
(``spacy.Span`` or ``spacy.Token``) or (``spacy.Span`` or ``spacy.Token``, ``spacy.Span`` or ``spacy.Token``):
dependin on if return_entity is enabled or not
Notes:
Inspired by <NAME>, <NAME>, <NAME>. Visual Analytics of
Media Frames in Online News and Blogs. IEEE InfoVis Workshop on Text
Visualization. October, 2013.
Which itself was inspired by by <NAME>.; <NAME>.; <NAME>.; and
<NAME>. 2010. Portable Extraction of Partially Structured Facts from
the Web. In Proc. ICETAL 2010, LNAI 6233, 345-356. Heidelberg, Springer.
"""
if ignore_entity_case is True:
entity_toks = entity.lower().split(' ')
get_tok_text = lambda x: x.lower_
else:
entity_toks = entity.split(' ')
get_tok_text = lambda x: x.text
first_entity_tok = entity_toks[0]
n_entity_toks = len(entity_toks)
#cue = cue.lower()
#cue_toks = cue.split(' ')
#n_cue_toks = len(cue_toks)
def is_good_last_tok(tok):
if tok.is_punct:
return False
if tok.pos in {CONJ, DET}:
return False
return True
for sent in doc.sents:
for tok in sent:
# filter by entity
if get_tok_text(tok) != first_entity_tok:
continue
if n_entity_toks == 1:
the_entity = tok
the_entity_root = the_entity
elif all(get_tok_text(tok.nbor(i=i + 1)) == et for i, et in enumerate(entity_toks[1:])):
the_entity = doc[tok.i: tok.i + n_entity_toks]
the_entity_root = the_entity.root
else:
continue
if return_entity:
yield (the_entity, sent.orth_)
else:
yield (sent.orth_)
break
def list_of_entity_statements(corpus, entity):
"""
Given an entity and a textacy corpus, return a list of all the sentences in which this entity occurs
Parameters
----------
corpus : textacy Corpus object
entity : str
The entity for which to search all the sentences within the corpus
Returns
-------
entity_sentences
A list of strings, each being a sentence which contains the entity search
"""
entity_sentences = [list(entity_statements(doc, entity=entity))
for doc
in corpus
if list(entity_statements(doc, entity=entity))] # If statement that removes null sentences
entity_sentences = [item for sublist in entity_sentences for item in sublist]
return entity_sentences
# Entity Sentiment extractions
def vader_entity_sentiment(df,
textacy_col,
entity,
inplace=True,
vader_sent_types=['neg', 'neu', 'pos', 'compound'],
keep_stats=['count', 'mean', 'min', '25%', '50%', '75%', 'max']):
"""
Pull the descriptive sentiment stats of text sentence with a specified entity in it.
Parameters
----------
df : DataFrame
Dataframe which holds the text
textacy_col : str
The name to give to the column with the textacy doc objects
entity : str
The entity to search the textacy Doc object for
inplace : bool
Whether to return the entire df with the sentiment info or the sentiment info alone
Default is False
vader_sent_types : list
The type of sentiment to extract. neg: negative, pos: positive, neu: neutral, compound is
comination of all three types of all
keep_stats : list
A list of the summary statistics to keep. Default is all returned by pandas DataFrame.describe() method
Returns
-------
DataFrame
Either the dataframe passed as arg with the sentiment info as trailing columns
or the sentiment descriptive stats by itself
"""
vader_analyzer = SentimentIntensityAnalyzer()
sentiment_rows = []
for text in df[textacy_col].values:
text_entities = list(entity_statements(text, entity))
# Iterate through all sentences and get sentiment analysis
entity_sentiment_info = [vader_analyzer.polarity_scores(sentence)
for
sentence
in
text_entities]
# After taking sentiments, turn into a dataframe and describe
try:
# Indices and columns to keep
keep_stats = keep_stats
keep_cols = vader_sent_types
# Describe those columns
summary_stats = | pd.DataFrame(entity_sentiment_info) | pandas.DataFrame |
import numpy as np
import pandas as pd
from torch.utils.data import Dataset
from torch import tensor, float32
import json
from collections import defaultdict
# представление очищенного датасета в pytorch
class DatasetModel(Dataset):
def __init__(self, df, vectorizer):
self.df = df
self._vectorizer = vectorizer
self._max_seq_length = max(map(len, self.df.predictor)) + 2
self.train_df = self.df[self.df.split == 'train']
self.train_size = len(self.train_df)
self.valid_df = self.df[self.df.split == 'valid']
self.valid_size = len(self.valid_df)
self.test_df = self.df[self.df.split == 'test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'valid': (self.valid_df, self.valid_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# веса для классов
class_counts = self.train_df.target.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.target_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequences = [count for _, count in sorted_counts]
self.class_weights = 1.0 / tensor(frequences, dtype=float32)
# загружает данные и создаёт векторизатор
@classmethod
def make_vectorizer(cls, path: str):
df = pd.read_csv(path)
train_df = df[df.split == 'train']
return cls(df, PredictorVectorizer.from_dataframe(train_df))
def get_vectorizer(self):
return self._vectorizer()
def save_vectorizer(self, vectorizer_filepath):
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def set_split(self, split='train'):
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
# точка входа для данных в pytorch
def __getitem__(self, index):
"index - индекс точки данных"
row = self._target_df.iloc[index]
predictor_vector, vec_length = self._vectorizer.vectorize(row.predictor, self._max_seq_length)
target_index = self._vectorizer.target_vocab.lookup_token(row.target)
return {'x_data': predictor_vector,
'y_target': target_index,
'x_length': vec_length}
def get_num_batches(self, batch_size):
return len(self) // batch_size
# векторизатор, приводящий словари в соотвествие друг другу и использующий их
class PredictorVectorizer:
def __init__(self, char_vocab, target_vocab):
"""
Аргументы:
char_vocab(Vocabulary) - последовательности в словари
target_vocab - таргет(категория) в словари
"""
self.char_vocab = char_vocab
self.target_vocab = target_vocab
def vectorize(self, predictor, vector_length=-1):
"""
Аргументы:
predictor - размер вложений символов
vector_length - длина вектора индексов
"""
indices = [self.char_vocab.begin_seq_index]
indices.extend(self.char_vocab.lookup_token(token)
for token in predictor)
indices.append(self.char_vocab.end_seq_index)
if vector_length < 0:
vector_length = len(indices)
out_vector = np.zeros(vector_length, dtype=np.int64)
out_vector[:len(indices)] = indices
out_vector[len(indices):] = self.char_vocab.mask_index
return out_vector, len(indices)
@classmethod
def from_dataframe(cls, df: pd.DataFrame):
char_vocab = SequenceVocabulary()
target_vocab = Vocabulary()
for index, row in df.iterrows():
tokens = row.predictor.split(' ')
for token in tokens:
char_vocab.add_token(token)
target_vocab.add_token(row.target)
return cls(char_vocab, target_vocab)
@classmethod
def from_serializable(cls, contents):
char_vocab = SequenceVocabulary.from_serializable(contents['char_vocab'])
target_vocab = Vocabulary.from_serializable(contents['target_vocab'])
return cls(char_vocab=char_vocab, target_vocab=target_vocab)
def to_serializable(self):
return {'char_vocab': self.char_vocab.to_serializable(),
'target_vocab': self.target_vocab.to_serializable()}
# отображение токенов в числовую форму - технические словари
class Vocabulary:
"""
Аргументы:
token_to_idx: dict - соотвествие токенов индексам
add_unk: bool - нужно ли добавлять токен UNK
unk_token - добавляемый в словарь токен UNK
"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token='<UNK>'):
if token_to_idx is None:
token_to_idx = dict()
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
# сериализуемый словарь
def to_serializable(self):
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
# экземпляр класса на основе сериализованного словаря
@classmethod
def from_serializable(cls, contents):
return cls(**contents)
# обновляет словари отображения - если токен не найден, то добавляет в словарь
def add_token(self, token):
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
# извлекает соответствующий токену индекс или индекс UNK, если токен не найден
def lookup_token(self, token):
if self._add_unk:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
# возвращает соотвествующий индексу токен
def lookup_index(self, index):
if index not in self._idx_to_token:
raise KeyError('Индекс (%d) не в словаре' % index)
return self._idx_to_token[index]
def __str__(self):
return '<Словарь (size=%d)>' % len(self)
def __len__(self):
return len(self._token_to_idx)
# токенизация последовательностей
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token='<UNK>',
mask_token="<MASK>", begin_seq_token='<BEGIN>',
end_seq_token='<END>'):
super(SequenceVocabulary, self).__init__(token_to_idx)
self._mask_token = mask_token # для работы с последовательностями переменной длины
self._unk_token = unk_token # для обозначения отсуствующих токенов в словаре
self._begin_seq_token = begin_seq_token # начало предложения
self._end_seq_token = end_seq_token # конец предложения
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class TrainValidSplit:
def __init__(self,
train_proportion: float,
valid_proportion: float,
test_proportion: float,
raw_df_path: str,
seed: int):
self.by_target = self.get_target_dict(raw_df_path)
self.final_list = self.make_split(self.by_target, train_proportion, valid_proportion,
test_proportion, seed)
@staticmethod
def get_target_dict(raw_df_path):
df = pd.read_csv(raw_df_path)
by_target = defaultdict(list)
for _, row in df.iterrows():
by_target[row.target].append(row.to_dict())
return by_target
@staticmethod
def make_split(by_target, train_proportion, valid_proportion, test_proportion, seed):
final_list = []
np.random.seed(seed)
for _, item_list in sorted(by_target.items()):
np.random.shuffle(item_list)
n = len(item_list)
n_train = int(train_proportion * n)
n_valid = int(valid_proportion * n)
n_test = int(test_proportion * n)
for item in item_list[:n_train]:
item['split'] = 'train'
for item in item_list[n_train:n_train + n_valid]:
item['split'] = 'valid'
for item in item_list[n_train + n_valid:]:
item['split'] = 'test'
final_list.extend(item_list)
return final_list
def save_prepared_data(self, prepared_df_path):
prepared_data = | pd.DataFrame(self.final_list) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 09:35:31 2018
@author: <EMAIL>
Last modified: 2019-11-04
------------------------------------------------------
** Semantic Search Analysis: Build MeSH term list **
------------------------------------------------------
This script: Creates the file UmlsMesh.csv, from UMLS's MRCONSO.RFF
and MRSTY.RRF. When we limit to MeSH, the file can be shared without each user
signing the UMLS license agreement, because the MeSH vocabulary is free from
the U.S. National Library of Medicine.
This script limits the list to the preferred "atom" for each concept (CUI),
sets a limit on term length, and stacks all the semantic types when there is
more than one.
You should occasionally check the release notes to see if they changed any
columns or the column order.
PreferredTerm is the MeSH term, and AdjustedQueryTerm is PreferredTerm, but
ONLY a-zA-Z0-9 characters. AdjustedQueryTerm should be used to match the cleaned
logs.
INPUTS (explained below):
- data/external/MRCONSO.RFF
- data/external/MRSTY.RRF
OUTPUTS:
- data/matchFiles/UmlsMesh.csv
----------------
SCRIPT CONTENTS
----------------
1. Start-up / What to put into place, where
2. From MRCONSO.RFF, get string (term name, STR) and the unique identifier
for concept (CUI)
3. Create df from local SemanticNetworkReference and MRSTY.RRF
4. Join the two dfs into umlsTermList files: 'STR', 'CUI', 'SemanticType'
5. Write to file, show stats
"""
#%%
# ============================================
# 1. Start-up / What to put into place, where
# ============================================
'''
HOW TO PREPARE THE SOURCE FILES
1. Create or free up data/external/umls_distribution (save old version until new one works)
2. Check that you have 40 GB of free disk space.
3. Log in to https://www.nlm.nih.gov/research/umls/licensedcontent/umlsknowledgesources.html
4. Download the current MRCONSO.RRF, listed down the page. Move it to
data/external/umls_distribution
5. Download the full release
6. Unzip the main file
7. Add the zip extension to 20xxaa-1-meta.nlm and unzip
8. Go into 20xxAA folder > META. Uncompress MRSTY.RRF.gz
9. Move MRSTY.RRF to data/external/umls_distribution
10. Remove the distribution files if you want to recover space, now or after
successful script run.
'''
import pandas as pd
import os
# Set working directory and directories for read/write
envHome = (os.environ['HOME'])
os.chdir(envHome + '/Projects/classifysearches')
dataRaw = 'data/external/' # Put log here before running script
dataMatchFiles = 'data/matchFiles/' # Permanent helper files; both reading and writing required
reports = 'reports/'
#%%
# ===========================================================================
# 2. From MRCONSO.RFF, get string (term name, STR) and the unique identifier
# for concept (CUI)
# ===========================================================================
'''
CONcept Names and SOurces info: https://www.ncbi.nlm.nih.gov/books/NBK9685/#ch03.sec3.3.4
SAB is the abbreviated source name, such as MSH for MeSH...
'''
# Huge import, ~14 million rows. File has no col headers.
VariationsWithinConcept = pd.read_csv(dataRaw + "MRCONSO.RRF", sep='|',
low_memory=False, index_col=False,
names = ["CUI","LAT","TS","LUI","STT","SUI","ISPREF",
"AUI","SAUI","SCUI","SDUI","SAB","TTY","CODE",
"STR","SRL","SUPPRESS","CVF"])
#%%
# --------------------------------------------------------------------------
# Limit to MeSH so file can be posted to repo, and matching is easier
meshOnly = VariationsWithinConcept.loc[(VariationsWithinConcept['SAB'] == "MSH")]
# Reduce cols
atomForCui = meshOnly[['STR','CUI','LAT', 'SAB']] # ,"TTY","TS","ISPREF"
# Lower-case STR to match lowercased logs
atomForCui['STR'] = atomForCui['STR'].str.lower()
del [[VariationsWithinConcept]]
#%%
# ===============================================================
# 3. Create df from local SemanticNetworkReference and MRSTY.RRF
# ===============================================================
'''
One CUI can have several Semantic Types. This order of operations allows
stacking the Semantic Types before matching them with the term file.
'''
# Open custom-created 'Semantic Network' key
'''
Semantic types and groups - see https://semanticnetwork.nlm.nih.gov/ file
SemGroups.txt, has additional info such as a col to build CSS indents in
reporting, etc.
'''
SemanticNetwork = pd.read_excel(dataMatchFiles + 'SemanticNetworkReference.xlsx')
SemanticNetwork.columns
'''
'SemanticTypeAbr', 'SemanticType', 'SemanticGroup', 'SemanticGroupAbr',
'CustomTreeNumber', 'BranchPosition', 'UniqueID', 'SemanticGroupCode',
'Definition', 'Examples', 'RelationName', 'SemTypeTreeNo', 'UsageNote',
'NonHumanFlag', 'RecordType', 'TUI'
'''
# Reduce cols. Don't need definitions, etc. here
SemanticNetwork = SemanticNetwork[["TUI", "SemanticTypeAbr", "SemanticGroupCode",
"SemanticGroup", "SemanticGroupAbr",
"CustomTreeNumber", "SemanticType",
"BranchPosition", "UniqueID"]]
'''
The below, showing how concept IDs (CUIs) relate to semantic type
IDs, allows you to join human-readable labels to each other.
MRSTY.RRF; info at https://www.ncbi.nlm.nih.gov/books/NBK9685/
Col. Description
CUI Unique identifier of concept
TUI Unique identifier of Semantic Type
STN Semantic Type tree number
STY Semantic Type. The valid values are defined in the Semantic Network.
ATUI Unique identifier for attribute
CVF Content View Flag. Bit field used to flag rows included in Content View.
This field is a varchar field to maximize the number of bits available for use.
Sample Record in documentation, HOWEVER 2018AA only has content in the first 2.
C0001175|T047|B2.2.1.2.1|Disease or Syndrome|AT17683839|3840|
'''
TuisByCui = pd.read_csv(dataRaw + "MRSTY.RRF", sep='|', index_col=False,
names = ["CUI","TUI","STN","STY","ATUI","CVF"])
# ~4.1 million rows
ViewSomeTuis = TuisByCui[10000:10500]
# Reduce columns
TuisByCui = TuisByCui[["CUI","TUI"]]
SemanticJoinFile = | pd.merge(TuisByCui, SemanticNetwork, left_on='TUI', right_on='TUI', how='left') | pandas.merge |
from unittest import TestCase
from unittest.mock import (
ANY,
Mock,
patch,
)
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from pypika import Order
from fireant.queries.pagination import paginate
from fireant.tests.dataset.mocks import (
dimx2_date_bool_df,
dimx2_date_str_df,
dimx2_str_num_df,
dimx3_date_str_str_df,
)
TS = "$timestamp"
mock_table_widget = Mock()
mock_table_widget.group_pagination = False
mock_chart_widget = Mock()
mock_chart_widget.group_pagination = True
mock_dimension_definition = Mock()
mock_dimension_definition.alias = "$political_party"
mock_metric_definition = Mock()
mock_metric_definition.alias = "$votes"
class SimplePaginationTests(TestCase):
@patch("fireant.queries.pagination._simple_paginate")
def test_that_with_no_widgets_using_group_pagination_that_simple_pagination_is_applied(self, mock_paginate):
paginate(dimx2_date_str_df, [mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
@patch("fireant.queries.pagination._simple_paginate")
def test_that_with_group_pagination_and_one_dimension_that_simple_pagination_is_applied(self, mock_paginate):
paginate(dimx2_str_num_df, [mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
def test_paginate_with_limit_slice_data_frame_to_limit(self):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], limit=5)
expected = dimx2_date_str_df[:5]
assert_frame_equal(expected, paginated)
def test_paginate_with_offset_slice_data_frame_from_offset(self):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], offset=5)
expected = dimx2_date_str_df[5:]
assert_frame_equal(expected, paginated)
def test_paginate_with_limit_and_offset_slice_data_frame_from_offset_to_offset_plus_limit(
self,
):
paginated = paginate(dimx2_date_str_df, [mock_table_widget], limit=5, offset=5)
expected = dimx2_date_str_df[5:10]
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_dimension_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_dimension_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_dimension_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_dimension_definition.alias], ascending=False)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=False)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_multiple_orders(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[
(mock_dimension_definition, Order.asc),
(mock_metric_definition, Order.desc),
],
)
expected = dimx2_date_str_df.sort_values(
by=[mock_dimension_definition.alias, mock_metric_definition.alias],
ascending=[True, False],
)
assert_frame_equal(expected, paginated)
def test_apply_sort_before_slice(self):
paginated = paginate(
dimx2_date_str_df,
[mock_table_widget],
orders=[(mock_metric_definition, Order.asc)],
limit=5,
offset=5,
)
expected = dimx2_date_str_df.sort_values(by=[mock_metric_definition.alias], ascending=True)[5:10]
assert_frame_equal(expected, paginated)
class GroupPaginationTests(TestCase):
@patch("fireant.queries.pagination._group_paginate")
def test_with_one_widget_using_group_pagination_that_group_pagination_is_applied(self, mock_paginate):
paginate(dimx2_date_str_df, [mock_chart_widget, mock_table_widget])
mock_paginate.assert_called_once_with(ANY, ANY, ANY, ANY)
def test_paginate_with_limit_slice_data_frame_to_limit_in_each_group(self):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], limit=2)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][:2]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex).dropna().astype(np.int64)
assert_frame_equal(expected, paginated)
def test_paginate_with_offset_slice_data_frame_from_offset_in_each_group(self):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], offset=2)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][2:]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex)
assert_frame_equal(expected, paginated)
def test_paginate_with_limit_and_offset_slice_data_frame_from_offset_to_offset_plus_limit_in_each_group(
self,
):
paginated = paginate(dimx2_date_str_df, [mock_chart_widget], limit=1, offset=1)
index = dimx2_date_str_df.index
reindex = pd.MultiIndex.from_product([index.levels[0], index.levels[1][1:2]], names=index.names)
expected = dimx2_date_str_df.reindex(reindex).dropna().astype(np.int64)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_dimension_definition, Order.asc)],
)
expected = dimx2_date_str_df.sort_values(by=[TS, mock_dimension_definition.alias], ascending=True)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_dimension_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_dimension_definition, Order.desc)],
)
expected = dimx2_date_str_df.sort_values(by=[TS, mock_dimension_definition.alias], ascending=(True, False))
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_asc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.iloc[[1, 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]
assert_frame_equal(expected, paginated)
def test_apply_sort_with_one_order_metric_desc(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.desc)],
)
expected = dimx2_date_str_df.iloc[[2, 0, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11]]
assert_frame_equal(expected, paginated)
def test_apply_sort_multiple_levels_df(self):
paginated = paginate(
dimx3_date_str_str_df,
[mock_chart_widget],
orders=[(mock_metric_definition, Order.asc)],
)
sorted_groups = dimx3_date_str_str_df.groupby(level=[1, 2]).sum().sort_values(by="$votes", ascending=True).index
expected = (
dimx3_date_str_str_df.groupby(level=0)
.apply(lambda df: df.reset_index(level=0, drop=True).reindex(sorted_groups))
.dropna()
)
metrics = ["$votes", "$wins", "$wins_with_style", "$turnout"]
expected[metrics] = expected[metrics].astype(np.int64)
assert_frame_equal(expected, paginated)
def test_apply_sort_with_multiple_orders(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
orders=[
(mock_dimension_definition, Order.asc),
(mock_metric_definition, Order.desc),
],
)
expected = dimx2_date_str_df.sort_values(
by=[TS, mock_dimension_definition.alias, mock_metric_definition.alias],
ascending=[True, True, False],
)
assert_frame_equal(expected, paginated)
def test_apply_sort_before_slice(self):
paginated = paginate(
dimx2_date_str_df,
[mock_chart_widget],
limit=1,
offset=1,
orders=[(mock_metric_definition, Order.asc)],
)
expected = dimx2_date_str_df.iloc[[0, 3, 5, 7, 9, 11]]
assert_frame_equal(expected, paginated)
def test_group_paginate_with_bool_dims__no_pagination(self):
# This test does not apply any pagination but checks that none of the dimension values get lost
expected = dimx2_date_bool_df
paginated = paginate(dimx2_date_bool_df, [mock_chart_widget])
assert_frame_equal(expected, paginated)
def test_group_paginate_with_bool_dims__paginate_single_value(self):
# This test does not apply any pagination but checks that none of the dimension values get lost
paginated = paginate(dimx2_date_bool_df, [mock_chart_widget], limit=1)
expected = dimx2_date_bool_df.loc[(slice(None), False), :]
| assert_frame_equal(expected, paginated) | pandas.testing.assert_frame_equal |
def to_ascii(rows, n=None):
from terminaltables import AsciiTable
if n is None:
n = rows.max_display_rows
table_data = [rows.headers]
for each in rows.rows[:n]:
table_data.append(each)
if len(table_data) < len(rows):
table_data.append(['...']*len(table_data[0]))
asciitable = AsciiTable(table_data)
asciitable.outer_border = False
return asciitable.table
def _html_table_row(row, header=False):
tag = 'th' if header else 'td'
row_items = ' '.join('<{tag}>{}</{tag}>'.format(each, tag=tag) for each in row)
return '<tr>{}</tr>'.format(row_items)
def to_html(rows, n=None):
if n is None:
n = rows.max_display_rows
headers = rows.headers
print_rows = rows.rows[:n]
more = _html_table_row(['...']*len(headers)) if n < len(rows.rows) else ''
html_rows = '\n'.join(_html_table_row(row) for row in print_rows)
html_table = '\n'.join(['<table style="font-size:10pt; white-space:nowrap;">',
_html_table_row(headers, header=True),
html_rows,
more,
'</table>'])
return html_table
def to_pandas(rows):
import pandas as pd
return | pd.DataFrame(rows.rows, columns=rows.headers) | pandas.DataFrame |
"""
This script transforms the Semeval Task 5: Hyperpartisan News Detection data
provided in XML format, to CSV format for easier use.
"""
import pandas as pd
import xml.etree.cElementTree as et
import numpy as np
gfiles = ["./ground-truth-training-byarticle-20181122.xml",
"./ground-truth-training-bypublisher-20181122.xml",
"./ground-truth-validation-bypublisher-20181122.xml"]
gdfCols = ["hyperpartisan", "id", "labeled-by", "url", "bias"]
files = ["./articles-training-byarticle-20181122.xml",
"./articles-training-bypublisher-20181122.xml",
"./articles-validation-bypublisher-20181122.xml"]
dfCols = ["id", "published-at", "title", "text"]
# Handles articles
for _file in files:
df = pd.DataFrame(columns=dfCols)
index = 1
i = []
p = []
ti = []
te = []
for node in et.parse(_file).getroot():
i.append(node.attrib.get("id"))
p.append(node.attrib.get("published-at"))
ti.append(node.attrib.get("title"))
if node.text is not None:
node.text = None
article = ""
for paragraph in node.itertext():
article += paragraph
te.append(article)
index += 1
if index % 100 == 0:
print(index)
df["id"], df["published-at"], df["title"], df["text"] = pd.Series(i), pd.Series(p), pd.Series(ti), pd.Series(te)
print(df.shape)
df.to_csv(_file[:-4] + ".csv")
# Handles ground truth
for gfile in gfiles:
df = pd.DataFrame(columns=gdfCols)
index = 1
h = []
i = []
l = []
u = []
b = []
for node in et.parse(gfile).getroot():
h.append(node.attrib.get("hyperpartisan"))
i.append(node.attrib.get("id"))
l.append(node.attrib.get("labeled-by"))
u.append(node.attrib.get("url"))
b.append(node.attrib.get("bias") if node.attrib.get("bias") is not None else "")
index += 1
if index % 100 == 0:
print(index)
df["hyperpartisan"], df["id"], df["labeled-by"], df["url"], df["bias"] = pd.Series(h), pd.Series(i), pd.Series(l), pd.Series(u), pd.Series(b)
print(df.shape)
df.to_csv(gfile[:-4] + ".csv")
# Merges
for i in range(len(files)):
print(i)
df_file, df_gfile = | pd.read_csv(files[i][:-4] + ".csv", sep=',') | pandas.read_csv |
import ffn
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal as aae
try:
df = pd.read_csv('tests/data/test_data.csv', index_col=0, parse_dates=True)
except FileNotFoundError as e:
try:
df = pd.read_csv('data/test_data.csv', index_col=0, parse_dates=True)
except FileNotFoundError as e2:
raise(str(e2))
ts = df['AAPL'][0:10]
def test_to_returns_ts():
data = ts
actual = data.to_returns()
assert len(actual) == len(data)
assert np.isnan(actual[0])
aae(actual[1], -0.019, 3)
aae(actual[9], -0.022, 3)
def test_to_returns_df():
data = df
actual = data.to_returns()
assert len(actual) == len(data)
assert all(np.isnan(actual.iloc[0]))
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['AAPL'][9], -0.022, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['MSFT'][9], -0.014, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['C'][9], 0.004, 3)
def test_to_log_returns_ts():
data = ts
actual = data.to_log_returns()
assert len(actual) == len(data)
assert np.isnan(actual[0])
aae(actual[1], -0.019, 3)
aae(actual[9], -0.022, 3)
def test_to_log_returns_df():
data = df
actual = data.to_log_returns()
assert len(actual) == len(data)
assert all(np.isnan(actual.iloc[0]))
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['AAPL'][9], -0.022, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['MSFT'][9], -0.014, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['C'][9], 0.004, 3)
def test_to_price_index():
data = df
rets = data.to_returns()
actual = rets.to_price_index()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 100, 3)
aae(actual['MSFT'][0], 100, 3)
aae(actual['C'][0], 100, 3)
aae(actual['AAPL'][9], 91.366, 3)
aae(actual['MSFT'][9], 95.191, 3)
aae(actual['C'][9], 101.199, 3)
actual = rets.to_price_index(start=1)
assert len(actual) == len(data)
aae(actual['AAPL'][0], 1, 3)
aae(actual['MSFT'][0], 1, 3)
aae(actual['C'][0], 1, 3)
aae(actual['AAPL'][9], 0.914, 3)
aae(actual['MSFT'][9], 0.952, 3)
aae(actual['C'][9], 1.012, 3)
def test_rebase():
data = df
actual = data.rebase()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 100, 3)
aae(actual['MSFT'][0], 100, 3)
aae(actual['C'][0], 100, 3)
aae(actual['AAPL'][9], 91.366, 3)
aae(actual['MSFT'][9], 95.191, 3)
aae(actual['C'][9], 101.199, 3)
def test_to_drawdown_series_ts():
data = ts
actual = data.to_drawdown_series()
assert len(actual) == len(data)
aae(actual[0], 0, 3)
aae(actual[1], -0.019, 3)
aae(actual[9], -0.086, 3)
def test_to_drawdown_series_df():
data = df
actual = data.to_drawdown_series()
assert len(actual) == len(data)
aae(actual['AAPL'][0], 0, 3)
aae(actual['MSFT'][0], 0, 3)
aae(actual['C'][0], 0, 3)
aae(actual['AAPL'][1], -0.019, 3)
aae(actual['MSFT'][1], -0.011, 3)
aae(actual['C'][1], -0.012, 3)
aae(actual['AAPL'][9], -0.086, 3)
aae(actual['MSFT'][9], -0.048, 3)
aae(actual['C'][9], -0.029, 3)
def test_max_drawdown_ts():
data = ts
actual = data.calc_max_drawdown()
aae(actual, -0.086, 3)
def test_max_drawdown_df():
data = df
data = data[0:10]
actual = data.calc_max_drawdown()
aae(actual['AAPL'], -0.086, 3)
aae(actual['MSFT'], -0.048, 3)
aae(actual['C'], -0.033, 3)
def test_year_frac():
actual = ffn.year_frac(pd.to_datetime('2004-03-10'),
pd.to_datetime('2004-03-29'))
# not exactly the same as excel but close enough
aae(actual, 0.0520, 4)
def test_cagr_ts():
data = ts
actual = data.calc_cagr()
aae(actual, -0.921, 3)
def test_cagr_df():
data = df
actual = data.calc_cagr()
aae(actual['AAPL'], 0.440, 3)
aae(actual['MSFT'], 0.041, 3)
aae(actual['C'], -0.205, 3)
def test_merge():
a = pd.Series(index=pd.date_range('2010-01-01', periods=5),
data=100, name='a')
b = pd.Series(index=pd.date_range('2010-01-02', periods=5),
data=200, name='b')
actual = ffn.merge(a, b)
assert 'a' in actual
assert 'b' in actual
assert len(actual) == 6
assert len(actual.columns) == 2
assert np.isnan(actual['a'][-1])
assert np.isnan(actual['b'][0])
assert actual['a'][0] == 100
assert actual['a'][1] == 100
assert actual['b'][-1] == 200
assert actual['b'][1] == 200
old = actual
old.columns = ['c', 'd']
actual = ffn.merge(old, a, b)
assert 'a' in actual
assert 'b' in actual
assert 'c' in actual
assert 'd' in actual
assert len(actual) == 6
assert len(actual.columns) == 4
assert np.isnan(actual['a'][-1])
assert np.isnan(actual['b'][0])
assert actual['a'][0] == 100
assert actual['a'][1] == 100
assert actual['b'][-1] == 200
assert actual['b'][1] == 200
def test_calc_inv_vol_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_inv_vol_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.218, 3)
aae(actual['MSFT'], 0.464, 3)
aae(actual['C'], 0.318, 3)
def test_calc_mean_var_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_mean_var_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.000, 3)
aae(actual['MSFT'], 0.000, 3)
aae(actual['C'], 1.000, 3)
def test_calc_erc_weights():
prc = df.iloc[0:11]
rets = prc.to_returns().dropna()
actual = ffn.core.calc_erc_weights(rets)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.270, 3)
aae(actual['MSFT'], 0.374, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='ledoit-wolf',
risk_parity_method='slsqp',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.270, 3)
aae(actual['MSFT'], 0.374, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='standard',
risk_parity_method='ccd',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.234, 3)
aae(actual['MSFT'], 0.409, 3)
aae(actual['C'], 0.356, 3)
actual = ffn.core.calc_erc_weights(
rets,
covar_method='standard',
risk_parity_method='slsqp',
tolerance=1e-9
)
assert len(actual) == 3
assert 'AAPL' in actual
assert 'MSFT' in actual
assert 'C' in actual
aae(actual['AAPL'], 0.234, 3)
aae(actual['MSFT'], 0.409, 3)
aae(actual['C'], 0.356, 3)
def test_calc_total_return():
prc = df.iloc[0:11]
actual = prc.calc_total_return()
assert len(actual) == 3
aae(actual['AAPL'], -0.079, 3)
aae(actual['MSFT'], -0.038, 3)
aae(actual['C'], 0.012, 3)
def test_get_num_days_required():
actual = ffn.core.get_num_days_required(pd.DateOffset(months=3),
perc_required=1.)
assert actual >= 60
actual = ffn.core.get_num_days_required(pd.DateOffset(months=3),
perc_required=1.,
period='m')
assert actual >= 3
def test_asfreq_actual():
a = pd.Series({pd.to_datetime('2010-02-27'): 100,
pd.to_datetime('2010-03-25'): 200})
actual = a.asfreq_actual(freq='M', method='ffill')
assert len(actual) == 1
assert '2010-02-27' in actual
def test_to_monthly():
a = pd.Series(range(100), index=pd.date_range(
'2010-01-01', periods=100))
# to test for actual dates
a['2010-01-31'] = np.nan
a = a.dropna()
actual = a.to_monthly()
assert len(actual) == 3
assert '2010-01-30' in actual
assert actual['2010-01-30'] == 29
def test_drop_duplicate_cols():
a = pd.Series(index=pd.date_range('2010-01-01', periods=5),
data=100, name='a')
# second version of a w/ less data
a2 = pd.Series(index=pd.date_range('2010-01-02', periods=4),
data=900, name='a')
b = pd.Series(index=pd.date_range('2010-01-02', periods=5),
data=200, name='b')
actual = ffn.merge(a, a2, b)
assert actual['a'].shape[1] == 2
assert len(actual.columns) == 3
actual = actual.drop_duplicate_cols()
assert len(actual.columns) == 2
assert 'a' in actual
assert 'b' in actual
assert len(actual['a'].dropna()) == 5
def test_limit_weights():
w = {'a': 0.3, 'b': 0.1,
'c': 0.05, 'd': 0.05, 'e': 0.5}
actual_exp = {'a': 0.3, 'b': 0.2, 'c': 0.1,
'd': 0.1, 'e': 0.3}
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
for k in actual_exp:
assert actual[k] == actual_exp[k]
w = pd.Series(w)
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
for k in actual_exp:
assert actual[k] == actual_exp[k]
w = pd.Series({'a': 0.29, 'b': 0.1,
'c': 0.06, 'd': 0.05, 'e': 0.5})
assert w.sum() == 1.0
actual = ffn.core.limit_weights(w, 0.3)
assert actual.sum() == 1.0
assert all(x <= 0.3 for x in actual)
aae(actual['a'], 0.300, 3)
aae(actual['b'], 0.190, 3)
aae(actual['c'], 0.114, 3)
aae(actual['d'], 0.095, 3)
aae(actual['e'], 0.300, 3)
def test_random_weights():
n = 10
bounds = (0., 1.)
tot = 1.0000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(lambda x: (x >= low and x <= high)).all().all()
n = 4
bounds = (0., 0.25)
tot = 1.0000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
n = 7
bounds = (0., 0.25)
tot = 0.8000
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
n = 10
bounds = (-.25, 0.25)
tot = 0.0
low = bounds[0]
high = bounds[1]
df = pd.DataFrame(index=range(1000), columns=range(n))
for i in df.index:
df.loc[i] = ffn.random_weights(n, bounds, tot)
assert df.sum(axis=1).apply(lambda x: np.round(x, 4) == tot).all()
assert df.applymap(
lambda x: (np.round(x, 2) >= low and
np.round(x, 2) <= high)).all().all()
def test_random_weights_throws_error():
try:
ffn.random_weights(2, (0., 0.25), 1.0)
assert False
except ValueError:
assert True
try:
ffn.random_weights(10, (0.5, 0.25), 1.0)
assert False
except ValueError:
assert True
try:
ffn.random_weights(10, (0.5, 0.75), 0.2)
assert False
except ValueError:
assert True
def test_rollapply():
a = | pd.Series([1, 2, 3, 4, 5]) | pandas.Series |
import numpy as np
import pytest
from pandas import Series
import pandas._testing as tm
def no_nans(x):
return x.notna().all().all()
def all_na(x):
return x.isnull().all().all()
@pytest.fixture(params=[(1, 0), (5, 1)])
def rolling_consistency_cases(request):
"""window, min_periods"""
return request.param
@pytest.mark.parametrize("f", [lambda v: Series(v).sum(), np.nansum, np.sum])
def test_rolling_apply_consistency_sum(
request, all_data, rolling_consistency_cases, center, f
):
window, min_periods = rolling_consistency_cases
if f is np.sum:
if not no_nans(all_data) and not (
all_na(all_data) and not all_data.empty and min_periods > 0
):
request.node.add_marker(
pytest.mark.xfail(reason="np.sum has different behavior with NaNs")
)
rolling_f_result = all_data.rolling(
window=window, min_periods=min_periods, center=center
).sum()
rolling_apply_f_result = all_data.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
| tm.assert_equal(rolling_f_result, rolling_apply_f_result) | pandas._testing.assert_equal |
import operator
import re
import warnings
import numpy as np
import pytest
from pandas._libs.sparse import IntIndex
import pandas.util._test_decorators as td
import pandas as pd
from pandas import isna
from pandas.core.sparse.api import SparseArray, SparseDtype, SparseSeries
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
@pytest.fixture(params=["integer", "block"])
def kind(request):
return request.param
class TestSparseArray:
def setup_method(self, method):
self.arr_data = np.array([np.nan, np.nan, 1, 2, 3,
np.nan, 4, 5, np.nan, 6])
self.arr = SparseArray(self.arr_data)
self.zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
def test_constructor_dtype(self):
arr = SparseArray([np.nan, 1, 2, np.nan])
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert arr.dtype.subtype == np.float64
assert np.isnan(arr.fill_value)
arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0)
assert arr.dtype == SparseDtype(np.float64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=np.float64)
assert arr.dtype == SparseDtype(np.float64, np.nan)
assert np.isnan(arr.fill_value)
arr = SparseArray([0, 1, 2, 4], dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None)
assert arr.dtype == SparseDtype(np.int64, 0)
assert arr.fill_value == 0
def test_constructor_dtype_str(self):
result = SparseArray([1, 2, 3], dtype='int')
expected = SparseArray([1, 2, 3], dtype=int)
tm.assert_sp_array_equal(result, expected)
def test_constructor_sparse_dtype(self):
result = SparseArray([1, 0, 0, 1], dtype=SparseDtype('int64', -1))
expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int64')
def test_constructor_sparse_dtype_str(self):
result = SparseArray([1, 0, 0, 1], dtype='Sparse[int32]')
expected = SparseArray([1, 0, 0, 1], dtype=np.int32)
tm.assert_sp_array_equal(result, expected)
assert result.sp_values.dtype == np.dtype('int32')
def test_constructor_object_dtype(self):
# GH 11856
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object)
assert arr.dtype == SparseDtype(np.object)
assert np.isnan(arr.fill_value)
arr = SparseArray(['A', 'A', np.nan, 'B'], dtype=np.object,
fill_value='A')
assert arr.dtype == SparseDtype(np.object, 'A')
assert arr.fill_value == 'A'
# GH 17574
data = [False, 0, 100.0, 0.0]
arr = SparseArray(data, dtype=np.object, fill_value=False)
assert arr.dtype == SparseDtype(np.object, False)
assert arr.fill_value is False
arr_expected = np.array(data, dtype=np.object)
it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected))
assert np.fromiter(it, dtype=np.bool).all()
@pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int])
def test_constructor_na_dtype(self, dtype):
with pytest.raises(ValueError, match="Cannot convert"):
SparseArray([0, 1, np.nan], dtype=dtype)
def test_constructor_spindex_dtype(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]))
# XXX: Behavior change: specifying SparseIndex no longer changes the
# fill_value
expected = SparseArray([0, 1, 2, 0], kind='integer')
tm.assert_sp_array_equal(arr, expected)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=np.int64, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=np.int64)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=[1, 2, 3],
sparse_index=IntIndex(4, [1, 2, 3]),
dtype=None, fill_value=0)
exp = SparseArray([0, 1, 2, 3], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize("sparse_index", [
None, IntIndex(1, [0]),
])
def test_constructor_spindex_dtype_scalar(self, sparse_index):
# scalar input
arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None)
exp = SparseArray([1], dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
def test_constructor_spindex_dtype_scalar_broadcasts(self):
arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2]),
fill_value=0, dtype=None)
exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == SparseDtype(np.int64)
assert arr.fill_value == 0
@pytest.mark.parametrize('data, fill_value', [
(np.array([1, 2]), 0),
(np.array([1.0, 2.0]), np.nan),
([True, False], False),
([pd.Timestamp('2017-01-01')], pd.NaT),
])
def test_constructor_inferred_fill_value(self, data, fill_value):
result = SparseArray(data).fill_value
if pd.isna(fill_value):
assert pd.isna(result)
else:
assert result == fill_value
@pytest.mark.parametrize('format', ['coo', 'csc', 'csr'])
@pytest.mark.parametrize('size', [
pytest.param(0,
marks=td.skip_if_np_lt("1.16",
reason='NumPy-11383')),
10
])
@td.skip_if_no_scipy
def test_from_spmatrix(self, size, format):
import scipy.sparse
mat = scipy.sparse.random(size, 1, density=0.5, format=format)
result = SparseArray.from_spmatrix(mat)
result = np.asarray(result)
expected = mat.toarray().ravel()
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no_scipy
def test_from_spmatrix_raises(self):
import scipy.sparse
mat = scipy.sparse.eye(5, 4, format='csc')
with pytest.raises(ValueError, match="not '4'"):
SparseArray.from_spmatrix(mat)
@pytest.mark.parametrize('scalar,dtype', [
(False, SparseDtype(bool, False)),
(0.0, SparseDtype('float64', 0)),
(1, SparseDtype('int64', 1)),
('z', SparseDtype('object', 'z'))])
def test_scalar_with_index_infer_dtype(self, scalar, dtype):
# GH 19163
arr = SparseArray(scalar, index=[1, 2, 3], fill_value=scalar)
exp = SparseArray([scalar, scalar, scalar], fill_value=scalar)
tm.assert_sp_array_equal(arr, exp)
assert arr.dtype == dtype
assert exp.dtype == dtype
@pytest.mark.parametrize("fill", [1, np.nan, 0])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip(self, kind, fill):
# see gh-13999
arr = SparseArray([np.nan, 1, np.nan, 2, 3],
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
arr = SparseArray([0, 0, 0, 1, 1, 2], dtype=np.int64,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr), dtype=np.int64)
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
@pytest.mark.parametrize("fill", [True, False, np.nan])
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
def test_sparse_series_round_trip2(self, kind, fill):
# see gh-13999
arr = SparseArray([True, False, True, True], dtype=np.bool,
kind=kind, fill_value=fill)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
res = SparseArray(SparseSeries(arr))
tm.assert_sp_array_equal(arr, res)
def test_get_item(self):
assert np.isnan(self.arr[1])
assert self.arr[2] == 1
assert self.arr[7] == 5
assert self.zarr[0] == 0
assert self.zarr[2] == 1
assert self.zarr[7] == 5
errmsg = re.compile("bounds")
with pytest.raises(IndexError, match=errmsg):
self.arr[11]
with pytest.raises(IndexError, match=errmsg):
self.arr[-11]
assert self.arr[-1] == self.arr[len(self.arr) - 1]
def test_take_scalar_raises(self):
msg = "'indices' must be an array, not a scalar '2'."
with pytest.raises(ValueError, match=msg):
self.arr.take(2)
def test_take(self):
exp = SparseArray(np.take(self.arr_data, [2, 3]))
tm.assert_sp_array_equal(self.arr.take([2, 3]), exp)
exp = SparseArray(np.take(self.arr_data, [0, 1, 2]))
tm.assert_sp_array_equal(self.arr.take([0, 1, 2]), exp)
def test_take_fill_value(self):
data = np.array([1, np.nan, 0, 3, 0])
sparse = SparseArray(data, fill_value=0)
exp = SparseArray(np.take(data, [0]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([0]), exp)
exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0)
tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp)
def test_take_negative(self):
exp = SparseArray(np.take(self.arr_data, [-1]))
tm.assert_sp_array_equal(self.arr.take([-1]), exp)
exp = SparseArray(np.take(self.arr_data, [-4, -3, -2]))
tm.assert_sp_array_equal(self.arr.take([-4, -3, -2]), exp)
@pytest.mark.parametrize('fill_value', [0, None, np.nan])
def test_shift_fill_value(self, fill_value):
# GH #24128
sparse = SparseArray(np.array([1, 0, 0, 3, 0]),
fill_value=8.0)
res = sparse.shift(1, fill_value=fill_value)
if isna(fill_value):
fill_value = res.dtype.na_value
exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]),
fill_value=8.0)
tm.assert_sp_array_equal(res, exp)
def test_bad_take(self):
with pytest.raises(IndexError, match="bounds"):
self.arr.take([11])
def test_take_filling(self):
# similar tests as GH 12631
sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4])
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
# XXX: test change: fill_value=True -> allow_fill=True
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
expected = SparseArray([np.nan, np.nan, np.nan])
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([np.nan, np.nan, 4])
tm.assert_sp_array_equal(result, expected)
msg = "Invalid value in 'indices'"
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), allow_fill=True)
def test_take_filling_fill_value(self):
# same tests as GH 12631
sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0)
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# fill_value
result = sparse.take(np.array([1, 0, -1]), allow_fill=True)
# XXX: behavior change.
# the old way of filling self.fill_value doesn't follow EA rules.
# It's supposed to be self.dtype.na_value (nan in this case)
expected = SparseArray([0, np.nan, np.nan], fill_value=0)
tm.assert_sp_array_equal(result, expected)
# allow_fill=False
result = sparse.take(np.array([1, 0, -1]),
allow_fill=False, fill_value=True)
expected = SparseArray([0, np.nan, 4], fill_value=0)
tm.assert_sp_array_equal(result, expected)
msg = ("Invalid value in 'indices'.")
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -2]), allow_fill=True)
with pytest.raises(ValueError, match=msg):
sparse.take(np.array([1, 0, -5]), allow_fill=True)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_take_filling_all_nan(self):
sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan])
# XXX: did the default kind from take change?
result = sparse.take(np.array([1, 0, -1]))
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
result = sparse.take(np.array([1, 0, -1]), fill_value=True)
expected = SparseArray([np.nan, np.nan, np.nan], kind='block')
tm.assert_sp_array_equal(result, expected)
with pytest.raises(IndexError):
sparse.take(np.array([1, -6]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]))
with pytest.raises(IndexError):
sparse.take(np.array([1, 5]), fill_value=True)
def test_set_item(self):
def setitem():
self.arr[5] = 3
def setslice():
self.arr[1:5] = 2
with pytest.raises(TypeError, match="assignment via setitem"):
setitem()
with pytest.raises(TypeError, match="assignment via setitem"):
setslice()
def test_constructor_from_too_large_array(self):
with pytest.raises(TypeError, match="expected dimension <= 1 data"):
SparseArray(np.arange(10).reshape((2, 5)))
def test_constructor_from_sparse(self):
res = SparseArray(self.zarr)
assert res.fill_value == 0
assert_almost_equal(res.sp_values, self.zarr.sp_values)
def test_constructor_copy(self):
cp = SparseArray(self.arr, copy=True)
cp.sp_values[:3] = 0
assert not (self.arr.sp_values[:3] == 0).any()
not_copy = SparseArray(self.arr)
not_copy.sp_values[:3] = 0
assert (self.arr.sp_values[:3] == 0).all()
def test_constructor_bool(self):
# GH 10648
data = np.array([False, False, True, True, False, False])
arr = SparseArray(data, fill_value=False, dtype=bool)
assert arr.dtype == SparseDtype(bool)
tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True]))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([2, 3], np.int32))
dense = arr.to_dense()
assert dense.dtype == bool
tm.assert_numpy_array_equal(dense, data)
def test_constructor_bool_fill_value(self):
arr = SparseArray([True, False, True], dtype=None)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool)
assert arr.dtype == SparseDtype(np.bool)
assert not arr.fill_value
arr = SparseArray([True, False, True], dtype=np.bool, fill_value=True)
assert arr.dtype == SparseDtype(np.bool, True)
assert arr.fill_value
def test_constructor_float32(self):
# GH 10648
data = np.array([1., np.nan, 3], dtype=np.float32)
arr = SparseArray(data, dtype=np.float32)
assert arr.dtype == SparseDtype(np.float32)
tm.assert_numpy_array_equal(arr.sp_values,
np.array([1, 3], dtype=np.float32))
# Behavior change: np.asarray densifies.
# tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr))
tm.assert_numpy_array_equal(arr.sp_index.indices,
np.array([0, 2], dtype=np.int32))
dense = arr.to_dense()
assert dense.dtype == np.float32
tm.assert_numpy_array_equal(dense, data)
def test_astype(self):
# float -> float
arr = SparseArray([None, None, 0, 2])
result = arr.astype("Sparse[float32]")
expected = SparseArray([None, None, 0, 2], dtype=np.dtype('float32'))
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("float64", fill_value=0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0., 2.],
dtype=dtype.subtype),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
dtype = SparseDtype("int64", 0)
result = arr.astype(dtype)
expected = SparseArray._simple_new(np.array([0, 2], dtype=np.int64),
IntIndex(4, [2, 3]),
dtype)
tm.assert_sp_array_equal(result, expected)
arr = SparseArray([0, np.nan, 0, 1], fill_value=0)
with pytest.raises(ValueError, match='NA'):
arr.astype('Sparse[i8]')
def test_astype_bool(self):
a = pd.SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0))
result = a.astype(bool)
expected = SparseArray([True, 0, 0, True],
dtype=SparseDtype(bool, 0))
tm.assert_sp_array_equal(result, expected)
# update fill value
result = a.astype(SparseDtype(bool, False))
expected = SparseArray([True, False, False, True],
dtype=SparseDtype(bool, False))
tm.assert_sp_array_equal(result, expected)
def test_astype_all(self, any_real_dtype):
vals = np.array([1, 2, 3])
arr = SparseArray(vals, fill_value=1)
typ = np.dtype(any_real_dtype)
res = arr.astype(typ)
assert res.dtype == SparseDtype(typ, 1)
assert res.sp_values.dtype == typ
tm.assert_numpy_array_equal(np.asarray(res.to_dense()),
vals.astype(typ))
@pytest.mark.parametrize('array, dtype, expected', [
(SparseArray([0, 1]), 'float',
SparseArray([0., 1.], dtype=SparseDtype(float, 0.0))),
(SparseArray([0, 1]), bool, SparseArray([False, True])),
(SparseArray([0, 1], fill_value=1), bool,
SparseArray([False, True], dtype=SparseDtype(bool, True))),
pytest.param(
SparseArray([0, 1]), 'datetime64[ns]',
SparseArray(np.array([0, 1], dtype='datetime64[ns]'),
dtype=SparseDtype('datetime64[ns]',
pd.Timestamp('1970'))),
marks=[pytest.mark.xfail(reason="NumPy-7619")],
),
(SparseArray([0, 1, 10]), str,
SparseArray(['0', '1', '10'], dtype=SparseDtype(str, '0'))),
(SparseArray(['10', '20']), float, SparseArray([10.0, 20.0])),
(SparseArray([0, 1, 0]), object,
SparseArray([0, 1, 0], dtype=SparseDtype(object, 0))),
])
def test_astype_more(self, array, dtype, expected):
result = array.astype(dtype)
tm.assert_sp_array_equal(result, expected)
def test_astype_nan_raises(self):
arr = SparseArray([1.0, np.nan])
with pytest.raises(ValueError, match='Cannot convert non-finite'):
arr.astype(int)
def test_set_fill_value(self):
arr = SparseArray([1., np.nan, 2.], fill_value=np.nan)
arr.fill_value = 2
assert arr.fill_value == 2
arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64)
arr.fill_value = 2
assert arr.fill_value == 2
# XXX: this seems fine? You can construct an integer
# sparsearray with NaN fill value, why not update one?
# coerces to int
# msg = "unable to set fill_value 3\\.1 to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 3.1
assert arr.fill_value == 3.1
# msg = "unable to set fill_value nan to int64 dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
arr.fill_value = True
assert arr.fill_value
# coerces to bool
# msg = "unable to set fill_value 0 to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = 0
assert arr.fill_value == 0
# msg = "unable to set fill_value nan to bool dtype"
# with pytest.raises(ValueError, match=msg):
arr.fill_value = np.nan
assert np.isnan(arr.fill_value)
@pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)])
def test_set_fill_invalid_non_scalar(self, val):
arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool)
msg = "fill_value must be a scalar"
with pytest.raises(ValueError, match=msg):
arr.fill_value = val
def test_copy(self):
arr2 = self.arr.copy()
assert arr2.sp_values is not self.arr.sp_values
assert arr2.sp_index is self.arr.sp_index
def test_values_asarray(self):
assert_almost_equal(self.arr.to_dense(), self.arr_data)
@pytest.mark.parametrize('data,shape,dtype', [
([0, 0, 0, 0, 0], (5,), None),
([], (0,), None),
([0], (1,), None),
(['A', 'A', np.nan, 'B'], (4,), np.object)
])
def test_shape(self, data, shape, dtype):
# GH 21126
out = SparseArray(data, dtype=dtype)
assert out.shape == shape
@pytest.mark.parametrize("vals", [
[np.nan, np.nan, np.nan, np.nan, np.nan],
[1, np.nan, np.nan, 3, np.nan],
[1, np.nan, 0, 3, 0],
])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_dense_repr(self, vals, fill_value):
vals = np.array(vals)
arr = SparseArray(vals, fill_value=fill_value)
res = arr.to_dense()
tm.assert_numpy_array_equal(res, vals)
with tm.assert_produces_warning(FutureWarning):
res2 = arr.get_values()
tm.assert_numpy_array_equal(res2, vals)
def test_getitem(self):
def _checkit(i):
assert_almost_equal(self.arr[i], self.arr.to_dense()[i])
for i in range(len(self.arr)):
_checkit(i)
_checkit(-i)
def test_getitem_arraylike_mask(self):
arr = SparseArray([0, 1, 2])
result = arr[[True, False, True]]
expected = SparseArray([0, 2])
tm.assert_sp_array_equal(result, expected)
def test_getslice(self):
result = self.arr[:-3]
exp = SparseArray(self.arr.to_dense()[:-3])
tm.assert_sp_array_equal(result, exp)
result = self.arr[-4:]
exp = SparseArray(self.arr.to_dense()[-4:])
tm.assert_sp_array_equal(result, exp)
# two corner cases from Series
result = self.arr[-12:]
exp = SparseArray(self.arr)
tm.assert_sp_array_equal(result, exp)
result = self.arr[:-12]
exp = SparseArray(self.arr.to_dense()[:0])
tm.assert_sp_array_equal(result, exp)
def test_getslice_tuple(self):
dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0])
sparse = SparseArray(dense)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ])
tm.assert_sp_array_equal(res, exp)
sparse = SparseArray(dense, fill_value=0)
res = sparse[4:, ]
exp = SparseArray(dense[4:, ], fill_value=0)
tm.assert_sp_array_equal(res, exp)
with pytest.raises(IndexError):
sparse[4:, :]
with pytest.raises(IndexError):
# check numpy compat
dense[4:, :]
def test_boolean_slice_empty(self):
arr = pd.SparseArray([0, 1, 2])
res = arr[[False, False, False]]
assert res.dtype == arr.dtype
@pytest.mark.parametrize("op", ["add", "sub", "mul",
"truediv", "floordiv", "pow"])
def test_binary_operators(self, op):
op = getattr(operator, op)
data1 = np.random.randn(20)
data2 = np.random.randn(20)
data1[::2] = np.nan
data2[::3] = np.nan
arr1 = SparseArray(data1)
arr2 = SparseArray(data2)
data1[::2] = 3
data2[::3] = 3
farr1 = SparseArray(data1, fill_value=3)
farr2 = SparseArray(data2, fill_value=3)
def _check_op(op, first, second):
res = op(first, second)
exp = SparseArray(op(first.to_dense(), second.to_dense()),
fill_value=first.fill_value)
assert isinstance(res, SparseArray)
assert_almost_equal(res.to_dense(), exp.to_dense())
res2 = op(first, second.to_dense())
assert isinstance(res2, SparseArray)
tm.assert_sp_array_equal(res, res2)
res3 = op(first.to_dense(), second)
assert isinstance(res3, SparseArray)
tm.assert_sp_array_equal(res, res3)
res4 = op(first, 4)
assert isinstance(res4, SparseArray)
# Ignore this if the actual op raises (e.g. pow).
try:
exp = op(first.to_dense(), 4)
exp_fv = op(first.fill_value, 4)
except ValueError:
pass
else:
assert_almost_equal(res4.fill_value, exp_fv)
assert_almost_equal(res4.to_dense(), exp)
with np.errstate(all="ignore"):
for first_arr, second_arr in [(arr1, arr2), (farr1, farr2)]:
_check_op(op, first_arr, second_arr)
def test_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
tm.assert_sp_array_equal(unpickled, obj)
_check_roundtrip(self.arr)
_check_roundtrip(self.zarr)
def test_generator_warnings(self):
sp_arr = | SparseArray([1, 2, 3]) | pandas.core.sparse.api.SparseArray |
from . import wrapper_double, wrapper_float
import numpy as np, pandas as pd
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix, issparse, isspmatrix_coo, isspmatrix_csr, isspmatrix_csc
import multiprocessing
import ctypes
import warnings
__all__ = ["CMF", "CMF_implicit",
"OMF_explicit", "OMF_implicit",
"MostPopular", "ContentBased",
"CMF_imputer"]
### TODO: this module should move from doing operations in Python to
### using the new designated C functions for each type of prediction.
### TODO: eliminate the hard dependency on pandas.
class _CMF:
def __repr__(self):
return self.__str__()
def set_params(self, **params):
"""
Set the parameters of this estimator.
Kept for compatibility with scikit-learn.
Note
----
Setting any parameter that is related to model hyperparameters (i.e. anything not
related to verbosity or number of threads) will reset the model - that is,
it will no longer be possible to use it for predictions without a new refit.
Parameters
----------
**params : dict
Estimator parameters.
Returns
-------
self : estimator instance
Estimator instance.
"""
if not params:
return self
valid_params = self.get_params()
for k,v in params.items():
if k not in valid_params.keys():
raise ValueError("Invalid parameter %s" % k)
else:
if v not in ["verbose", "nthreads", "n_jobs", "print_every", "handle_interrupt", "random_state"]:
self.is_fitted_ = False
setattr(self, k, v)
return self
def _take_params(self, implicit=False, alpha=40., downweight=False,
apply_log_transf=False,
nonneg=False, nonneg_C=False, nonneg_D=False,
max_cd_steps=100,
k=50, lambda_=1e2, method="als", add_implicit_features=False,
scale_lam=False, scale_lam_sideinfo=False, scale_bias_const=False,
use_cg=False, max_cg_steps=3, finalize_chol=False,
user_bias=True, item_bias=True, center=False,
k_user=0, k_item=0, k_main=0,
w_main=1., w_user=1., w_item=1., w_implicit=0.5,
l1_lambda=0., center_U=True, center_I=True,
maxiter=400, niter=10, parallelize="separate", corr_pairs=4,
NA_as_zero=False, NA_as_zero_user=False, NA_as_zero_item=False,
precompute_for_predictions=True, use_float=False,
random_state=1, verbose=True,
print_every=10, handle_interrupt=True,
produce_dicts=False, nthreads=-1, n_jobs=None):
assert method in ["als", "lbfgs"]
assert parallelize in ["separate", "single"]
k = int(k) if isinstance(k, float) else k
k_user = int(k_user) if isinstance(k_user, float) else k_user
k_item = int(k_item) if isinstance(k_item, float) else k_item
k_main = int(k_main) if isinstance(k_main, float) else k_main
if not isinstance(self, OMF_explicit):
assert isinstance(k, int) and k > 0
else:
assert isinstance(k, int) and k >= 0
assert isinstance(k_user, int) and k_user >= 0
assert isinstance(k_item, int) and k_item >= 0
assert isinstance(k_main, int) and k_main >= 0
if ((max(k_user, k_item) + k + k_main + max(user_bias, item_bias))**2) > np.iinfo(ctypes.c_int).max:
raise ValueError("Number of factors is too large.")
lambda_ = float(lambda_) if isinstance(lambda_, int) else lambda_
if (isinstance(lambda_, list) or isinstance(lambda_, tuple) or isinstance(lambda_, pd.Series)):
lambda_ = np.array(lambda_)
if isinstance(lambda_, np.ndarray):
lambda_ = lambda_.reshape(-1)
assert lambda_.shape[0] == 6
assert np.all(lambda_ >= 0.)
else:
assert isinstance(lambda_, float) and lambda_ >= 0.
l1_lambda = float(l1_lambda) if isinstance(l1_lambda, int) else l1_lambda
if (isinstance(l1_lambda, list) or isinstance(l1_lambda, tuple) or isinstance(l1_lambda, pd.Series)):
l1_lambda = np.array(l1_lambda)
if isinstance(l1_lambda, np.ndarray):
l1_lambda = l1_lambda.reshape(-1)
assert l1_lambda.shape[0] == 6
assert np.all(l1_lambda >= 0.)
else:
assert isinstance(l1_lambda, float) and l1_lambda >= 0.
niter = int(niter) if isinstance(niter, float) else niter
assert isinstance(niter, int) and niter >= 0
if not implicit and method == "lbfgs":
maxiter = int(maxiter) if isinstance(maxiter, float) else maxiter
assert isinstance(maxiter, int) and maxiter >= 0
if n_jobs is not None:
nthreads = n_jobs
if nthreads < 1:
nthreads = multiprocessing.cpu_count() + 1 - nthreads
if nthreads is None:
nthreads = 1
assert isinstance(nthreads, int) and nthreads > 0
if (nthreads > 1) and (not wrapper_double._get_has_openmp()):
msg_omp = "Attempting to use more than 1 thread, but "
msg_omp += "package was built without multi-threading "
msg_omp += "support - see the project's GitHub page for "
msg_omp += "more information."
warnings.warn(msg_omp)
if not implicit and method == "lbfgs":
print_every = int(print_every) if isinstance(print_every, float) else print_every
assert isinstance(print_every, int) and print_every >= 0
if not implicit and method == "lbfgs":
corr_pairs = int(corr_pairs) if isinstance(corr_pairs, float) else corr_pairs
assert isinstance(corr_pairs, int) and corr_pairs >= 2
if random_state is None:
random_state = rng.default_rng()
if isinstance(random_state, np.random.RandomState):
random_state = random_state.randint(np.iinfo(np.int32).max)
elif isinstance(random_state, np.random.Generator):
random_state = random_state.integers(np.iinfo(np.int32).max)
if (method == "lbfgs"):
if (NA_as_zero or NA_as_zero_user or NA_as_zero_item):
raise ValueError("Option 'NA_as_zero' not supported with method='lbfgs'.")
if add_implicit_features:
raise ValueError("Option 'add_implicit_features' not supported with method='lbfgs'.")
if (nonneg) or (nonneg_C) or (nonneg_D):
raise ValueError("non-negativity constraints not supported with method='lbfgs'.")
if (scale_lam) or (scale_lam_sideinfo):
raise ValueError("'scale_lam' not supported with method='lbfgs'.")
if l1_lambda != 0.:
raise ValueError("L1 regularization not supported with method='lbfgs'.")
if method == "als":
assert max_cg_steps > 0
if max_cd_steps is None:
max_cd_steps = 0
if isinstance(max_cd_steps, float):
max_cd_steps = int(max_cd_steps)
assert max_cd_steps >= 0
assert isinstance(max_cd_steps, int)
w_main = float(w_main) if isinstance(w_main, int) else w_main
w_user = float(w_user) if isinstance(w_user, int) else w_user
w_item = float(w_item) if isinstance(w_item, int) else w_item
w_implicit = float(w_implicit) if isinstance(w_implicit, int) else w_implicit
assert isinstance(w_main, float) and w_main > 0
assert isinstance(w_user, float) and w_user > 0
assert isinstance(w_item, float) and w_item > 0
assert isinstance(w_implicit, float) and w_implicit > 0
if implicit:
alpha = float(alpha) if isinstance(alpha, int) else alpha
assert isinstance(alpha, float) and alpha > 0.
if (center and nonneg):
warnings.warn("Warning: will fit a model with centering and non-negativity constraints.")
if (center_U and nonneg_C):
warnings.warn("Warning: will fit a model with centering in 'U' and non-negativity constraints in 'C'.")
if (center_I and nonneg_D):
warnings.warn("Warning: will fit a model with centering in 'I' and non-negativity constraints in 'D'.")
if (NA_as_zero and add_implicit_features):
warnings.warn("Warning: will add implicit features while having 'NA_as_zero'.")
self.k = k
self.k_user = k_user
self.k_item = k_item
self.k_main = k_main
self.lambda_ = lambda_
self.l1_lambda = l1_lambda
self.scale_lam = bool(scale_lam)
self.scale_lam_sideinfo = bool(scale_lam_sideinfo) or self.scale_lam
self.scale_bias_const = bool(scale_bias_const)
self.alpha = alpha
self.w_main = w_main
self.w_user = w_user
self.w_item = w_item
self.w_implicit = w_implicit
self.downweight = bool(downweight)
self.user_bias = bool(user_bias)
self.item_bias = bool(item_bias)
self.center = bool(center) and not bool(implicit)
self.center_U = bool(center_U)
self.center_I = bool(center_I)
self.method = method
self.add_implicit_features = bool(add_implicit_features)
self.apply_log_transf = bool(apply_log_transf)
self.use_cg = bool(use_cg)
self.max_cg_steps = int(max_cg_steps)
self.max_cd_steps = int(max_cd_steps)
self.finalize_chol = bool(finalize_chol)
self.maxiter = maxiter
self.niter = niter
self.parallelize = parallelize
self.NA_as_zero = bool(NA_as_zero)
self.NA_as_zero_user = bool(NA_as_zero_user)
self.NA_as_zero_item = bool(NA_as_zero_item)
self.nonneg = bool(nonneg)
self.nonneg_C = bool(nonneg_C)
self.nonneg_D = bool(nonneg_D)
self.precompute_for_predictions = bool(precompute_for_predictions)
self.include_all_X = True
self.use_float = bool(use_float)
self.verbose = bool(verbose)
self.print_every = print_every
self.corr_pairs = corr_pairs
self.random_state = int(random_state)
self.produce_dicts = bool(produce_dicts)
self.handle_interrupt = bool(handle_interrupt)
self.nthreads = nthreads
self._implicit = bool(implicit)
self.dtype_ = ctypes.c_float if use_float else ctypes.c_double
self._k_pred = k
self._k_main_col = self.k_main
if isinstance(self.lambda_, np.ndarray):
if self.lambda_.dtype != self.dtype_:
self.lambda_ = self.lambda_.astype(self.dtype_)
if isinstance(self.l1_lambda, np.ndarray):
if self.l1_lambda.dtype != self.dtype_:
self.l1_lambda = self.l1_lambda.astype(self.dtype_)
self._reset()
def _reset(self):
self.A_ = np.empty((0,0), dtype=self.dtype_)
self.B_ = np.empty((0,0), dtype=self.dtype_)
self.C_ = np.empty((0,0), dtype=self.dtype_)
self.D_ = np.empty((0,0), dtype=self.dtype_)
self.Cbin_ = np.empty((0,0), dtype=self.dtype_)
self.Dbin_ = np.empty((0,0), dtype=self.dtype_)
self.Ai_ = np.empty((0,0), dtype=self.dtype_)
self.Bi_ = np.empty((0,0), dtype=self.dtype_)
self.user_bias_ = np.empty(0, dtype=self.dtype_)
self.item_bias_ = np.empty(0, dtype=self.dtype_)
self.scaling_biasA_ = 0.
self.scaling_biasB_ = 0.
self.C_bias_ = np.empty(0, dtype=self.dtype_)
self.D_bias_ = np.empty(0, dtype=self.dtype_)
self.glob_mean_ = 0.
self._TransBtBinvBt = np.empty((0,0), dtype=self.dtype_)
## will have lambda added for implicit but not for explicit, dim is k+k_main
self._BtB = np.empty((0,0), dtype=self.dtype_)
self._BtXbias = np.empty(0, dtype=self.dtype_)
self._TransCtCinvCt = np.empty((0,0), dtype=self.dtype_)
## will be multiplied by w_user already
self._CtC = np.empty((0,0), dtype=self.dtype_)
self._BeTBe = np.empty((0,0), dtype=self.dtype_)
self._BeTBeChol = np.empty((0,0), dtype=self.dtype_)
self._BiTBi = np.empty((0,0), dtype=self.dtype_)
self._CtUbias = np.empty(0, dtype=self.dtype_)
self._A_pred = np.empty((0,0), dtype=self.dtype_)
self._B_pred = np.empty((0,0), dtype=self.dtype_)
self._B_plus_bias = np.empty((0,0), dtype=self.dtype_)
self._U_cols = np.empty(0, dtype=object)
self._I_cols = np.empty(0, dtype=object)
self._Ub_cols = np.empty(0, dtype=object)
self._Ib_cols = np.empty(0, dtype=object)
self._U_colmeans = np.empty(0, dtype=self.dtype_)
self._I_colmeans = np.empty(0, dtype=self.dtype_)
self._w_main_multiplier = 1.
self.is_fitted_ = False
self._only_prediction_info = False
self.nfev_ = None
self.nupd_ = None
self.user_mapping_ = np.array([], dtype=object)
self.item_mapping_ = np.array([], dtype=object)
self.reindex_ = False
self.user_dict_ = dict()
self.item_dict_ = dict()
def _take_params_offsets(self, k_sec=0, k_main=0, add_intercepts=True):
k_sec = int(k_sec) if isinstance(k_sec, float) else k_sec
k_main = int(k_main) if isinstance(k_main, float) else k_main
assert isinstance(k_sec, int) and k_sec >= 0
assert isinstance(k_main, int) and k_main >= 0
if ((max(k_sec, k_main) + self.k)**2 + 1) > np.iinfo(ctypes.c_int).max:
raise ValueError("Number of factors is too large.")
if self.method == "als":
if self._implicit:
msg = " not supported for implicit-feedback."
else:
msg = " not supported with method='als'."
if k_sec > 0 or k_main > 0:
raise ValueError("'k_sec' and 'k_main'" + msg)
if isinstance(self.lambda_, np.ndarray):
raise ValueError("Different regularization for each parameter is" + msg)
if self.w_user != 1. or self.w_item != 1.:
raise ValueError("'w_user' and 'w_main' are" + msg)
self.k_sec = k_sec
self.k_main = k_main
self._k_pred = self.k_sec + self.k + self.k_main
self._k_main_col = 0
self.add_intercepts = bool(add_intercepts)
def _append_NAs(self, U, m_u, p, append_U):
U_new = np.repeat(np.nan, m_u*p).reshape((m_u, p))
if U_new.dtype != self.dtype_:
U_new = U_new.astype(U.dtype)
if not U_new.flags["C_CONTIGUOUS"]:
U_new = np.ascontiguousarray(U_new)
U_new[np.setdiff1d(np.arange(m_u), append_U), :] = U
if U_new.dtype != self.dtype_:
U_new = U_new.astype(U.dtype)
return U_new
def _decompose_coo(self, X):
row = X.row
col = X.col
val = X.data
if row.dtype != ctypes.c_int:
row = row.astype(ctypes.c_int)
if col.dtype != ctypes.c_int:
col = col.astype(ctypes.c_int)
if val.dtype != self.dtype_:
val = val.astype(self.dtype_)
return row, col, val
def _process_U_arr(self, U):
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Uarr = np.empty((0,0), dtype=self.dtype_)
Ucols = np.empty(0, dtype=object)
m = 0
p = 0
if issparse(U) and not isspmatrix_coo(U):
U = U.tocoo()
if isspmatrix_coo(U):
Urow, Ucol, Uval = self._decompose_coo(U)
m, p = U.shape
elif U is not None:
if isinstance(U, pd.DataFrame):
Ucols = U.columns.to_numpy()
U = U.to_numpy()
if not U.flags["C_CONTIGUOUS"]:
U = np.ascontiguousarray(U)
if U.dtype != self.dtype_:
U = U.astype(self.dtype_)
Uarr = U
m, p = Uarr.shape
return Urow, Ucol, Uval, Uarr, Ucols, m, p
def _convert_ids(self, X, U, U_bin, col="UserId"):
### Note: if one 'UserId' column is a Pandas Categorical, then all
### of them in the other DataFrames have to be too.
swapped = False
append_U = np.empty(0, dtype=object)
append_Ub = np.empty(0, dtype=object)
msg = "'X' and side info have no IDs in common."
if (U is not None) and (U_bin is not None):
user_ids1 = np.intersect1d(U[col].to_numpy(), X[col].to_numpy())
user_ids2 = np.intersect1d(U_bin[col].to_numpy(), X[col].to_numpy())
user_ids3 = np.intersect1d(U_bin[col].to_numpy(), U[col].to_numpy())
if (user_ids1.shape[0] == 0) and (user_ids2.shape[0] == 0):
raise ValueError(msg)
user_ids = np.intersect1d(user_ids1, user_ids2)
u_not_x = np.setdiff1d(U[col].to_numpy(), X[col].to_numpy())
x_not_u = np.setdiff1d(X[col].to_numpy(), U[col].to_numpy())
b_not_x = np.setdiff1d(U_bin[col].to_numpy(), X[col].to_numpy())
x_not_b = np.setdiff1d(X[col].to_numpy(), U_bin[col].to_numpy())
b_not_u = np.setdiff1d(U_bin[col].to_numpy(), U[col].to_numpy())
u_not_b = np.setdiff1d(U[col].to_numpy(), U_bin[col].to_numpy())
### There can be cases in which the sets are disjoint,
### and will need to add NAs to one of the inputs.
if (u_not_x.shape[0] == 0 and
x_not_u.shape[0] == 0 and
b_not_x.shape[0] == 0 and
x_not_b.shape[0] == 0 and
b_not_u.shape[0] == 0 and
u_not_b.shape[0] == 0):
user_ids = user_ids
else:
if u_not_b.shape[0] >= b_not_u.shape[0]:
user_ids = np.r_[user_ids, user_ids1, X[col].to_numpy(), user_ids3, U[col].to_numpy(), U_bin[col].to_numpy()]
append_U = x_not_u
append_Ub = np.r_[x_not_b, u_not_b]
else:
user_ids = np.r_[user_ids, user_ids2, X[col].to_numpy(), user_ids3, U_bin[col].to_numpy(), U[col].to_numpy()]
append_U = np.r_[x_not_u, b_not_u]
append_Ub = x_not_b
_, user_mapping_ = pd.factorize(user_ids)
X = X.assign(**{col : pd.Categorical(X[col], user_mapping_).codes})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
U = U.assign(**{col : pd.Categorical(U[col], user_mapping_).codes})
if U[col].dtype != ctypes.c_int:
U = U.assign({col : U[col].astype(ctypes.c_int)})
U_bin = U_bin.assign(**{col : pd.Categorical(U_bin[col], user_mapping_).codes})
if U_bin[col].dtype != ctypes.c_int:
U_bin = U_bin.assign(**{col : U_bin[col].astype(ctypes.c_int)})
if append_U.shape[0]:
append_U = pd.Categorical(np.unique(append_U), user_mapping_).codes.astype(ctypes.c_int)
append_U = np.sort(append_U)
if append_Ub.shape[0]:
append_Ub = pd.Categorical(np.unique(append_Ub), user_mapping_).codes.astype(ctypes.c_int)
append_Ub = np.sort(append_Ub)
else:
if (U is None) and (U_bin is not None):
U, U_bin = U_bin, U
swapped = True
if (U is not None):
user_ids = np.intersect1d(U[col].to_numpy(), X[col].to_numpy())
if user_ids.shape[0] == 0:
raise ValueError(msg)
u_not_x = np.setdiff1d(U[col].to_numpy(), X[col].to_numpy())
x_not_u = np.setdiff1d(X[col].to_numpy(), U[col].to_numpy())
if (u_not_x.shape[0]) or (x_not_u.shape[0]):
### Case0: both have the same entries
### This is the ideal situation
if (x_not_u.shape[0] == 0) and (u_not_x.shape[0] == 0):
user_ids = user_ids
### Case1: X has IDs that U doesn't, but not the other way around
### Here there's no need to do anything special afterwards
if (x_not_u.shape[0] > 0) and (u_not_x.shape[0] == 0):
user_ids = np.r_[user_ids, x_not_u]
### Case2: U has IDs that X doesn't, but not the other way around
### Don't need to do anything special afterwards either
elif (u_not_x.shape[0] > 0) and (x_not_u.shape[0] == 0):
user_ids = np.r_[user_ids, u_not_x]
### Case3: both have IDs that the others don't
else:
user_ids = np.r_[user_ids, X[col].to_numpy(), U[col].to_numpy()]
append_U = x_not_u
_, user_mapping_ = pd.factorize(user_ids)
if not isinstance(user_mapping_, np.ndarray):
user_mapping_ = user_mapping_.to_numpy()
X = X.assign(**{col : pd.Categorical(X[col], user_mapping_).codes})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
U = U.assign(**{col : pd.Categorical(U[col], user_mapping_).codes})
if U[col].dtype != ctypes.c_int:
U = U.assign(**{col : U[col].astype(ctypes.c_int)})
if append_U.shape[0]:
append_U = pd.Categorical(append_U, user_mapping_).codes.astype(ctypes.c_int)
append_U = np.sort(append_U)
else:
X_col, user_mapping_ = pd.factorize(X[col].to_numpy())
X = X.assign(**{col : X_col})
if X[col].dtype != ctypes.c_int:
X = X.assign(**{col : X[col].astype(ctypes.c_int)})
if not isinstance(user_mapping_, np.ndarray):
user_mapping_ = user_mapping_.to_numpy()
if swapped:
U, U_bin = U_bin, U
append_U, append_Ub = append_Ub, append_U
return X, U, U_bin, user_mapping_, append_U, append_Ub
def _process_U_df(self, U, is_I=False, df_name="U"):
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Uarr = np.empty((0,0), dtype=self.dtype_)
Ucols = np.empty(0, dtype=object)
cl_take = "ItemId" if is_I else "UserId"
m = 0
p = 0
if U is not None:
if "ColumnId" in U.columns.values:
Urow = U[cl_take].astype(ctypes.c_int).to_numpy()
Ucol = U.ColumnId.astype(ctypes.c_int).to_numpy()
if "Value" not in U.columns.values:
msg = "If passing sparse '%s', must have column 'Value'."
msg = msg % df_name
raise ValueError(msg)
Uval = U.Value.astype(self.dtype_).to_numpy()
m = int(Urow.max() + 1)
p = int(Ucol.max() + 1)
else:
U = U.sort_values(cl_take)
Uarr = U[[cl for cl in U.columns.values if cl != cl_take]]
Ucols = Uarr.columns.to_numpy()
Uarr = Uarr.to_numpy()
if not Uarr.flags["C_CONTIGUOUS"]:
Uarr = np.ascontiguousarray(Uarr)
if Uarr.dtype != self.dtype_:
Uarr = Uarr.astype(self.dtype_)
m, p = Uarr.shape
return Urow, Ucol, Uval, Uarr, Ucols, m, p
def _process_new_U(self, U, U_col, U_val, U_bin, is_I=False):
letter = "U" if not is_I else "I"
name = "user" if not is_I else "item"
Mat = self.C_ if not is_I else self.D_
MatBin = self.Cbin_ if not is_I else self.Dbin_
Cols = self._U_cols if not is_I else self._I_cols
ColsBin = self._Ub_cols if not is_I else self._Ib_cols
dct = self.user_dict_ if not is_I else self.item_dict_
mapping = self.user_mapping_ if not is_I else self.item_mapping_
if ((U_col is not None) and (U_val is None)) or ((U_col is None) and (U_val is not None)):
raise ValueError("Must pass '%s_col' and '%s_val' together."
% (letter, letter))
if (U_col is not None) and (U is not None):
raise ValueError("Can only pass %s info in one format."
% name)
if (U is None) and (U_col is None) and (U_bin is None):
raise ValueError("Must pass %s side information in some format."
% name)
###
if U is not None:
if Mat.shape[0] == 0:
raise ValueError("Model was not fit to %s data." % name)
if isinstance(U, pd.DataFrame) and Cols.shape[0]:
U = U[Cols]
U = np.array(U).reshape(-1).astype(self.dtype_)
if U.shape[0] != Mat.shape[0]:
raise ValueError("Dimensions of %s don't match with earlier data."
% letter)
else:
U = np.empty(0, dtype=self.dtype_)
###
if U_bin is not None:
if MatBin.shape[0] == 0:
raise ValueError("Model was not fit to %s binary data." % name)
if isinstance(U_bin, pd.DataFrame) and (ColsBin.shape[0]):
U_bin = U_bin[ColsBin]
U_bin = np.array(U_bin).reshape(-1).astype(self.dtype_)
if U_bin.shape[0] != MatBin.shape[0]:
raise ValueError("Dimensions of %s_bin don't match with earlier data."
% letter)
else:
U_bin = np.empty(0, dtype=self.dtype_)
###
if U_col is not None:
if Mat.shape[0] == 0:
raise ValueError("Model was not fit to %s data." % name)
U_val = np.array(U_val).reshape(-1).astype(self.dtype_)
if U_val.shape[0] == 0:
if np.array(U_col).shape[0] > 0:
raise ValueError("'%s_col' and '%s_val' must have the same number of entries." % (letter, letter))
U_col = np.empty(0, dtype=ctypes.c_int)
U_val = np.empty(0, dtype=self.dtype_)
else:
if self.reindex_:
if len(dct):
try:
U_col = np.array([dct[u] for u in U_col])
except:
raise ValueError("Sparse inputs cannot contain missing values.")
else:
U_col = pd.Categorical(U_col, mapping).codes.astype(ctypes.c_int)
if np.any(U_col < 0):
raise ValueError("Sparse inputs cannot contain missing values.")
U_col = U_col.astype(ctypes.c_int)
else:
U_col = np.array(U_col).reshape(-1).astype(ctypes.c_int)
imin, imax = U_col.min(), U_col.max()
if np.isnan(imin) or np.isnan(imax):
raise ValueError("Sparse inputs cannot contain missing values.")
if (imin < 0) or (imax >= Mat.shape[0]):
msg = "Column indices for user info must be within the range"
msg += " of the data that was pased to 'fit'."
raise ValueError(msg)
if U_val.shape[0] != U_col.shape[0]:
raise ValueError("'%s_col' and '%s_val' must have the same number of entries." % (letter, letter))
else:
U_col = np.empty(0, dtype=ctypes.c_int)
U_val = np.empty(0, dtype=self.dtype_)
###
return U, U_col, U_val, U_bin
def _process_new_U_2d(self, U, is_I=False, allow_csr=False):
letter = "U" if not is_I else "I"
col_id = "UserId" if not is_I else "ItemId"
Cols = self._U_cols if not is_I else self._I_cols
Mat = self.C_ if not is_I else self.D_
Uarr = np.empty((0,0), dtype=self.dtype_)
Urow = np.empty(0, dtype=ctypes.c_int)
Ucol = np.empty(0, dtype=ctypes.c_int)
Uval = np.empty(0, dtype=self.dtype_)
Ucsr_p = np.empty(0, dtype=ctypes.c_size_t)
Ucsr_i = np.empty(0, dtype=ctypes.c_int)
Ucsr = np.empty(0, dtype=self.dtype_)
m, p = U.shape if U is not None else (0,0)
if (p != Mat.shape[0]) and (Mat.shape[0] > 0) and (p > 0):
msg = "'%s' must have the same columns "
msg += "as the data passed to 'fit'."
raise ValueError(msg % letter)
if issparse(U) and (not isspmatrix_coo(U)) and (not isspmatrix_csr(U)):
U = U.tocoo()
elif isspmatrix_csr(U) and not allow_csr:
U = U.tocoo()
if isinstance(U, pd.DataFrame):
if col_id in U.columns.values:
warnings.warn("'%s' not meaningful for new inputs." % col_id)
if Cols.shape[0]:
U = U[Cols]
Uarr = U.to_numpy()
Uarr = np.ascontiguousarray(Uarr)
if Uarr.dtype != self.dtype_:
Uarr = Uarr.astype(self.dtype_)
elif isspmatrix_coo(U):
Urow = U.row.astype(ctypes.c_int)
Ucol = U.col.astype(ctypes.c_int)
Uval = U.data.astype(self.dtype_)
elif isspmatrix_csr(U):
if not allow_csr:
raise ValueError("Unexpected error.")
Ucsr_p = U.indptr.astype(ctypes.c_size_t)
Ucsr_i = U.indices.astype(ctypes.c_int)
Ucsr = U.data.astype(self.dtype_)
elif isinstance(U, np.ndarray):
if not U.flags["C_CONTIGUOUS"]:
U = np.ascontiguousarray(U)
if U.dtype != self.dtype_:
U = U.astype(self.dtype_)
Uarr = U
elif U is None:
pass
else:
if not allow_csr:
msg = "'%s' must be a Pandas DataFrame, SciPy sparse COO, or NumPy array."
else:
msg = "'%s' must be a Pandas DataFrame, SciPy sparse CSR or COO, or NumPy array."
raise ValueError(msg % letter)
return Uarr, Urow, Ucol, Uval, Ucsr_p, Ucsr_i, Ucsr, m, p
def _process_new_Ub_2d(self, U_bin, is_I=False):
letter = "U" if not is_I else "I"
col_id = "UserId" if not is_I else "ItemId"
Cols = self._Ub_cols if not is_I else self._Ib_cols
Mat = self.Cbin_ if not is_I else self.Dbin_
Ub_arr = np.empty((0,0), dtype=self.dtype_)
m_ub, pbin = U_bin.shape if U_bin is not None else (0,0)
if max(m_ub, pbin) and (not Mat.shape[0] or not Mat.shape[1]):
raise ValueError("Cannot pass binary data if model was not fit to binary side info.")
if (pbin != Mat.shape[0]) and (Mat.shape[0] > 0) and (pbin > 0):
msg = "'%s_bin' must have the same columns "
msg += "as the data passed to 'fit'."
raise ValueError(msg % letter)
if isinstance(U_bin, pd.DataFrame):
if col_id in U_bin.columns.values:
warnings.warn("'%s' not meaningful for new inputs." % col_id)
if Cols.shape[0]:
U_bin = U_bin[Cols]
Ub_arr = U_bin.to_numpy()
Ub_arr = np.ascontiguousarray(Ub_arr)
if Ub_arr.dtype != self.dtype_:
Ub_arr = Ub_arr.astype(self.dtype_)
elif isinstance(Ub_arr, np.ndarray):
if not Ub_arr.flags["C_CONTIGUOUS"]:
Ub_arr = np.ascontiguousarray(Ub_arr)
if Ub_arr.dtype != self.dtype_:
Ub_arr = Ub_arr.astype(self.dtype_)
elif Ub_arr is None:
pass
else:
raise ValueError("'%s_bin' must be a Pandas DataFrame or NumPy array."
% letter)
return Ub_arr, m_ub, pbin
def _process_new_X_2d(self, X, W=None):
if len(X.shape) != 2:
raise ValueError("'X' must be 2-dimensional.")
Xarr = np.empty((0,0), dtype=self.dtype_)
Xrow = np.empty(0, dtype=ctypes.c_int)
Xcol = np.empty(0, dtype=ctypes.c_int)
Xval = np.empty(0, dtype=self.dtype_)
Xcsr_p = np.empty(0, dtype=ctypes.c_size_t)
Xcsr_i = np.empty(0, dtype=ctypes.c_int)
Xcsr = np.empty(0, dtype=self.dtype_)
W_dense = np.empty((0,0), dtype=self.dtype_)
W_sp = np.empty(0, dtype=self.dtype_)
m, n = X.shape
if issparse(X) and (not isspmatrix_coo(X)) and (not isspmatrix_csr(X)):
if (W is not None) and (not issparse(W)):
if not isinstance(W, np.ndarray):
W = np.array(W).reshape(-1)
if W.shape[0] != X.nnz:
raise ValueError("'X' and 'W' have different number of entries.")
if isspmatrix_csc(X):
W = csc_matrix((W, X.indices, X.indptr), shape=(X.shape[0], X.shape[1]))
W = W.tocoo()
else:
raise ValueError("Must pass 'X' as SciPy sparse COO if there are weights.")
X = X.tocoo()
if issparse(W) and (not isspmatrix_coo(W)) and (not isspmatrix_csr(W)):
W = W.tocoo()
if (isspmatrix_coo(X) != isspmatrix_coo(W)):
if not isspmatrix_coo(X):
X = X.tocoo()
if not isspmatrix_coo(W):
W = W.tocoo()
if issparse(W):
W = W.data
if isspmatrix_coo(X):
Xrow = X.row.astype(ctypes.c_int)
Xcol = X.col.astype(ctypes.c_int)
Xval = X.data.astype(self.dtype_)
if W is not None:
W_sp = np.array(W).reshape(-1).astype(self.dtype_)
if W_sp.shape[0] != Xval.shape[0]:
msg = "'W' must have the same number of non-zero entries "
msg += "as 'X'."
raise ValueError(msg)
elif isspmatrix_csr(X):
Xcsr_p = X.indptr.astype(ctypes.c_size_t)
Xcsr_i = X.indices.astype(ctypes.c_int)
Xcsr = X.data.astype(self.dtype_)
if W is not None:
W_sp = np.array(W).reshape(-1).astype(self.dtype_)
if W_sp.shape[0] != Xcsr.shape[0]:
msg = "'W' must have the same number of non-zero entries "
msg += "as 'X'."
raise ValueError(msg)
elif isinstance(X, np.ndarray):
if not X.flags["C_CONTIGUOUS"]:
X = np.ascontiguousarray(X)
if X.dtype != self.dtype_:
X = X.astype(self.dtype_)
Xarr = X
if W is not None:
assert W.shape[0] == X.shape[0]
assert W.shape[1] == X.shape[1]
if not W.flags["C_CONTIGUOUS"]:
W = np.ascontiguousarray(W)
if W.dtype != self.dtype_:
W = W.astype(self.dtype_)
W_dense = W
else:
raise ValueError("'X' must be a SciPy CSR or COO matrix, or NumPy array.")
if n > self._n_orig:
raise ValueError("'X' has more columns than what was passed to 'fit'.")
if self.apply_log_transf:
if Xval.min() < 1:
raise ValueError("Cannot pass values below 1 with 'apply_log_transf=True'.")
return Xarr, Xrow, Xcol, Xval, Xcsr_p, Xcsr_i, Xcsr, m, n, W_dense, W_sp
def _process_users_items(self, user, item, include, exclude, allows_no_item=True):
if (include is not None and np.any(pd.isnull(include))) \
or (exclude is not None and np.any(pd.isnull(exclude))):
raise ValueError("'include' and 'exclude' should not contain missing values.")
if include is not None and exclude is not None:
raise ValueError("Cannot pass 'include' and 'exclude' together.")
include = np.array(include).reshape(-1) if include is not None \
else np.empty(0, dtype=ctypes.c_int)
exclude = np.array(exclude).reshape(-1) if exclude is not None \
else np.empty(0, dtype=ctypes.c_int)
if isinstance(user, list) or isinstance(user, tuple):
user = np.array(user)
if isinstance(item, list) or isinstance(item, tuple):
item = np.array(item)
if isinstance(user, pd.Series):
user = user.to_numpy()
if isinstance(item, pd.Series):
item = item.to_numpy()
if user is not None:
if isinstance(user, np.ndarray):
if len(user.shape) > 1:
user = user.reshape(-1)
assert user.shape[0] > 0
if self.reindex_:
if user.shape[0] > 1:
user = pd.Categorical(user, self.user_mapping_).codes
if user.dtype != ctypes.c_int:
user = user.astype(ctypes.c_int)
else:
if len(self.user_dict_):
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(user, self.user_mapping_).codes[0]
else:
if self.reindex_:
if len(self.user_dict_):
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0]
user = np.array([user])
if item is not None:
if isinstance(item, np.ndarray):
if len(item.shape) > 1:
item = item.reshape(-1)
assert item.shape[0] > 0
if self.reindex_:
if item.shape[0] > 1:
item = pd.Categorical(item, self.item_mapping_).codes
if item.dtype != ctypes.c_int:
item = item.astype(ctypes.c_int)
else:
if len(self.item_dict_):
try:
item = self.item_dict_[item[0]]
except:
item = -1
else:
item = pd.Categorical(item, self.item_mapping_).codes[0]
else:
if self.reindex_:
if len(self.item_dict_):
try:
item = self.item_dict_[item]
except:
item = -1
else:
item = pd.Categorical(np.array([item]), self.item_mapping_).codes[0]
item = np.array([item])
else:
if not allows_no_item:
raise ValueError("Must pass IDs for 'item'.")
if self.reindex_:
msg = "'%s' should contain only items that were passed to 'fit'."
if include.shape[0]:
if len(self.item_dict_):
try:
include = np.array([self.item_dict_[i] for i in include])
except:
raise ValueError(msg % "include")
else:
include = pd.Categorical(include, self.item_mapping_).codes
if np.any(include < 0):
raise ValueError(msg % "include")
if include.dtype != ctypes.c_int:
include = include.astype(ctypes.c_int)
include = include.reshape(-1)
if exclude.shape[0]:
if len(self.item_dict_):
try:
exclude = np.array([self.item_dict_[i] for i in exclude])
except:
raise ValueError(msg % "exclude")
else:
exclude = pd.Categorical(exclude, self.item_mapping_).codes
if exclude.dtype != ctypes.c_int:
exclude = exclude.astype(ctypes.c_int)
if np.any(exclude < 0):
raise ValueError(msg % "exclude")
if exclude.dtype != ctypes.c_int:
exclude = exclude.astype(ctypes.c_int)
exclude = exclude.reshape(-1)
else:
msg = "'%s' entries must be within the range of the %s (%s)"
msg += " of the data that was passed to 'fit'."
if include.shape[0]:
imin, imax = include.min(), include.max()
if (imin < 0) or (imax >= self._B_pred.shape[0]):
raise ValueError(msg % ("include", "items", "columns"))
if exclude.shape[0]:
emin, emax = exclude.min(), exclude.max()
if (emin < 0) or (emax >= self._B_pred.shape[0]):
raise ValueError(msg % ("exclude", "items", "columns"))
if user is not None:
user = user.astype(ctypes.c_int)
if item is not None:
item = item.astype(ctypes.c_int)
if include.dtype != ctypes.c_int:
include = include.astype(ctypes.c_int)
if exclude.dtype != ctypes.c_int:
exclude = exclude.astype(ctypes.c_int)
return user, item, include, exclude
def _fit_common(self, X, U=None, I=None, U_bin=None, I_bin=None, W=None,
enforce_same_shape=False):
if (U_bin is not None or I_bin is not None) and self.method != "lbfgs":
msg = "Binary side info is only supported when using method='lbfgs'."
raise ValueError(msg)
self._reset()
if issparse(X) and (not isspmatrix_coo(X)):
if (W is not None) and (not issparse(W)):
if isspmatrix_csr(X):
if not isinstance(W, np.ndarray):
W = np.array(W).reshape(-1)
if W.shape[0] != X.nnz:
raise ValueError("'X' and 'W' have different number of entries.")
W = csr_matrix((W, X.indices, X.indptr), shape=(X.shape[0], X.shape[1]))
W = W.tocoo()
elif isspmatrix_csc(X):
if not isinstance(W, np.ndarray):
W = np.array(W).reshape(-1)
if W.shape[0] != X.nnz:
raise ValueError("'X' and 'W' have different number of entries.")
W = csc_matrix((W, X.indices, X.indptr), shape=(X.shape[0], X.shape[1]))
W = W.tocoo()
else:
raise ValueError("Must pass 'X' as SciPy COO if passing weights.")
X = X.tocoo()
if issparse(W) and (not isspmatrix_coo(W)):
W = W.tocoo()
if issparse(W):
W = W.data
if isinstance(X, pd.DataFrame):
msg = "If passing 'X' as DataFrame, '%s' must also be a DataFrame."
if U is not None and (not isinstance(U, pd.DataFrame)):
raise ValueError(msg % "U")
if I is not None and (not isinstance(I, pd.DataFrame)):
raise ValueError(msg % "I")
if U_bin is not None and (not isinstance(U_bin, pd.DataFrame)):
raise ValueError(msg % "U_bin")
if I_bin is not None and (not isinstance(I_bin, pd.DataFrame)):
raise ValueError(msg % "I_bin")
if W is not None:
msg = "Passing 'W' with 'X' as DataFrame is not supported."
msg += " Weight should be under a column in the DataFrame, "
msg += "called 'Weight'."
raise ValueError(msg)
assert "UserId" in X.columns.values
assert "ItemId" in X.columns.values
if (self._implicit) and ("Rating" in X.columns.values) and ("Value" not in X.columns.values):
X = X.rename(columns={"Rating":"Value"}, copy=False, inplace=False)
if self._implicit:
assert "Value" in X.columns.values
else:
assert "Rating" in X.columns.values
if U is not None:
assert "UserId" in U.columns.values
if I is not None:
assert "ItemId" in I.columns.values
if U_bin is not None:
assert "UserId" in U_bin.columns.values
if I_bin is not None:
assert "ItemId" in I_bin.columns.values
X, U, U_bin, self.user_mapping_, append_U, append_Ub = self._convert_ids(X, U, U_bin, "UserId")
X, I, I_bin, self.item_mapping_, append_I, append_Ib = self._convert_ids(X, I, I_bin, "ItemId")
Xrow = X.UserId.to_numpy()
Xcol = X.ItemId.to_numpy()
if Xrow.dtype != ctypes.c_int:
Xrow = Xrow.astype(ctypes.c_int)
if Xcol.dtype != ctypes.c_int:
Xcol = Xcol.astype(ctypes.c_int)
if self._implicit:
Xval = X.Value.to_numpy()
else:
Xval = X.Rating.to_numpy()
if Xval.dtype != self.dtype_:
Xval = Xval.astype(self.dtype_)
if Xval.shape[0] == 0:
raise ValueError("'X' contains no non-zero entries.")
Xarr = np.empty((0,0), dtype=self.dtype_)
W_sp = np.empty(0, dtype=self.dtype_)
if "Weight" in X.columns.values:
W_sp = X.Weight.astype(self.dtype_).to_numpy()
W_dense = np.empty((0,0), dtype=self.dtype_)
Urow, Ucol, Uval, Uarr, self._U_cols, m_u, p = self._process_U_df(U, False, "U")
Irow, Icol, Ival, Iarr, self._I_cols, n_i, q = self._process_U_df(I, True, "I")
Ub_arr = np.empty((0,0), dtype=self.dtype_)
Ib_arr = np.empty((0,0), dtype=self.dtype_)
m_ub = 0
pbin = 0
n_ib = 0
qbin = 0
msg = "Binary side info data cannot be passed in sparse format."
if U_bin is not None:
if "ColumnId" in U_bin.columns.values:
raise ValueError(msg)
_1, _2, _3, Ub_arr, self._Ub_cols, m_ub, pbin = self._process_U_df(U_bin, False, "U_bin")
if I_bin is not None:
if "ColumnId" in I_bin.columns.values:
raise ValueError(msg)
_1, _2, _3, Ib_arr, self._Ib_cols, n_ib, qbin = self._process_U_df(I_bin, True, "U_bin")
m_u += append_U.shape[0]
n_i += append_I.shape[0]
if append_U.shape[0] and Uarr is not None:
if enforce_same_shape:
raise ValueError("'X' and 'U' must have the same rows.")
Uarr = self._append_NAs(Uarr, m_u, p, append_U)
if append_I.shape[0] and Iarr is not None:
if enforce_same_shape:
raise ValueError("Columns of 'X' must match with rows of 'I'.")
Iarr = self._append_NAs(Iarr, n_i, q, append_I)
if append_Ub.shape[0]:
m_ub += append_Ub.shape[0]
Ub_arr = self._append_NAs(Ub_arr, m_ub, pbin, append_Ub)
if append_Ib.shape[0]:
n_ib += append_Ib.shape[0]
Ib_arr = self._append_NAs(Ib_arr, n_ib, qbin, append_Ib)
self.reindex_ = True
if self.produce_dicts:
self.user_dict_ = {self.user_mapping_[i]:i for i in range(self.user_mapping_.shape[0])}
self.item_dict_ = {self.item_mapping_[i]:i for i in range(self.item_mapping_.shape[0])}
elif isspmatrix_coo(X) or isinstance(X, np.ndarray):
if issparse(U) and not isspmatrix_coo(U):
U = U.tocoo()
if issparse(I) and not isspmatrix_coo(I):
I = I.tocoo()
msg = " must be a Pandas DataFrame, NumPy array, or SciPy sparse COO matrix."
msg_bin = " must be a Pandas DataFrame or NumPy array."
if U is not None and not (isinstance(U, pd.DataFrame) or isinstance(U, np.ndarray) or isspmatrix_coo(U)):
raise ValueError("'U'" + msg)
if I is not None and not (isinstance(I, pd.DataFrame) or isinstance(I, np.ndarray) or isspmatrix_coo(I)):
raise ValueError("'I'" + msg)
if U_bin is not None and not (isinstance(U_bin, pd.DataFrame) or isinstance(U_bin, np.ndarray)):
raise ValueError("'U_bin'" + msg_bin)
if I_bin is not None and not (isinstance(I_bin, pd.DataFrame) or isinstance(I_bin, np.ndarray)):
raise ValueError("'I_bin'" + msg_bin)
if W is not None:
if isinstance(W, list) or isinstance(W, pd.Series):
W = np.array(W)
if (len(W.shape) > 1) and isspmatrix_coo(X):
W = W.reshape(-1)
if (not isinstance(W, np.ndarray)) or \
(isspmatrix_coo(X) and W.shape[0] != X.nnz) or\
(isinstance(X, np.ndarray) and (W.shape[0] != X.shape[0] or W.shape[1] != X.shape[1])):
raise ValueError("'W' must be an array with the same number of entries as 'X'.")
if (self._implicit) and (isinstance(X, np.ndarray)) and (self.k_sec == 0):
raise ValueError("Dense arrays for 'X' not supported with implicit-feedback.")
Xrow, Xcol, Xval, Xarr, _1, _2, _3 = self._process_U_arr(X)
Urow, Ucol, Uval, Uarr, self._U_cols, m_u, p = self._process_U_arr(U)
Irow, Icol, Ival, Iarr, self._I_cols, n_i, q = self._process_U_arr(I)
_1, _2, _3, Ub_arr, self._Ub_cols, m_ub, pbin = self._process_U_arr(U_bin)
_1, _2, _3, Ib_arr, self._Ib_cols, n_ib, qbin = self._process_U_arr(I_bin)
if issparse(X) and (Xval.shape[0] == 0):
raise ValueError("'X' contains no non-zero entries.")
W_sp = np.empty(0, dtype=self.dtype_)
W_dense = np.empty((0,0), dtype=self.dtype_)
if W is not None:
if issparse(W) and not isspmatrix_coo(W):
W = W.tocoo()
if issparse(W):
W = W.data
if isspmatrix_coo(X):
W_sp = W.astype(self.dtype_)
else:
W_dense = W.astype(self.dtype_)
self.reindex_ = False
else:
msg = "'X' must be a Pandas DataFrame, SciPy COO matrix, or NumPy array."
raise ValueError(msg)
if Xarr.shape[0]:
m, n = Xarr.shape
else:
m = int(Xrow.max() + 1)
n = int(Xcol.max() + 1)
if isspmatrix_coo(X):
m = max(m, X.shape[0])
n = max(n, X.shape[1])
if enforce_same_shape:
m = max(m, m_u, m_ub)
n = max(n, n_i, n_ib)
if enforce_same_shape:
msg_err_rows = "'X' and 'U%s' must have the same rows."
msg_err_cols = "Columns of 'X' must match with rows of 'I%s'."
if Uarr.shape[0]:
if Uarr.shape[0] != m:
raise ValueError(msg_err_rows % "")
if Iarr.shape[0]:
if Iarr.shape[0] != n:
raise ValueError(msg_err_cols % "")
if Uval.shape[0]:
if m_u != m:
raise ValueError(msg_err_rows % "")
if Ival.shape[0]:
if n_i != n:
raise ValueError(msg_err_cols % "")
if Ub_arr.shape[0]:
if m_ub != m:
raise ValueError(msg_err_rows % "_bin")
if Ib_arr.shape[0]:
if n_ib != n:
raise ValueError(msg_err_rows % "_bin")
if max(m, n, m_u, n_i, p, q, m_ub, n_ib, pbin, qbin) > np.iinfo(ctypes.c_int).max:
msg = "Error: dimensionality of the inputs is too high. "
msg += "Number of rows/columns cannot be more than INT_MAX."
raise ValueError(msg)
if (max(m_u, m_ub, p, pbin) == 0) and (self.k_user):
self.k_user = 0
warnings.warn("No user side info provided, will set 'k_user' to zero.")
if (max(n_i, n_ib, q, qbin) == 0) and (self.k_item):
self.k_item = 0
warnings.warn("No item side info provided, will set 'k_item' to zero.")
if (m == 0) or (n == 0):
raise ValueError("'X' must have at least one row and column.")
if self.apply_log_transf:
msg_small = "Cannot pass values below 1 with 'apply_log_transf=True'."
if Xarr.shape[0]:
if np.nanmin(Xarr) < 1:
raise ValueError(msg_small)
elif Xval.shape[0]:
if Xval.min() < 1:
raise ValueError(msg_small)
if (self.NA_as_zero) and (Xarr.shape[0]):
warnings.warn("Warning: using 'NA_as_zero', but passed dense 'X'.")
if (self.NA_as_zero_user) and (Uarr.shape[0]):
warnings.warn("Warning: using 'NA_as_zero_user', but passed dense 'U'.")
if (self.NA_as_zero_item) and (Iarr.shape[0]):
warnings.warn("Warning: using 'NA_as_zero_item', but passed dense 'I'.")
return self._fit(Xrow, Xcol, Xval, W_sp, Xarr, W_dense,
Uarr, Urow, Ucol, Uval, Ub_arr,
Iarr, Irow, Icol, Ival, Ib_arr,
m, n, m_u, n_i, p, q,
m_ub, n_ib, pbin, qbin)
def predict(self, user, item):
"""
Predict ratings/values given by existing users to existing items
Note
----
For CMF explicit, invalid combinations of users and items will be
set to the global mean plus biases if applicable. For other models,
invalid combinations will be set as NaN.
Parameters
----------
user : array-like(n,)
Users for whom ratings/values are to be predicted. If 'X' passed to
fit was a DataFrame, must match with the entries in its 'UserId'
column, otherwise should match with the rows of 'X'.
item : array-like(n,)
Items for whom ratings/values are to be predicted. If 'X' passed to
fit was a DataFrame, must match with the entries in its 'ItemId'
column, otherwise should match with the columns of 'X'.
Each entry in ``item`` will be matched with the corresponding entry
of ``user`` at the same position in the array/list.
Returns
-------
scores : array(n,)
Predicted ratings for the requested user-item combinations.
"""
if user is None and item is None:
raise ValueError("Must pass valid user(s) and item(s).")
return self._predict(user=user, a_vec=None, a_bias=0., item=item)
def _predict(self, user=None, a_vec=None, a_bias=0., item=None):
assert self.is_fitted_
if self._only_prediction_info:
raise ValueError("Cannot use this function after dropping non-essential matrices.")
user, item, _1, _2 = self._process_users_items(user, item, None, None)
c_funs = wrapper_float if self.use_float else wrapper_double
if user is not None:
assert user.shape[0] == item.shape[0]
if user.shape[0] == 1:
if (user[0] == -1) or (item[0] == -1):
if isinstance(self, CMF):
out = self.glob_mean_
if (user[0] >= 0) and (self.user_bias):
out += self.user_bias_[user]
if (item[0] >= 0) and (self.item_bias):
out += self.item_bias_[item]
if (self.center) or (self.user_bias and user[0] >= 0) or (self.item_bias and item[0] >= 0):
return out
return np.nan
else:
out = self._A_pred[user, self.k_user:].dot(self._B_pred[item, self.k_item:].T).reshape(-1)[0]
out += self.glob_mean_
if self.user_bias:
out += self.user_bias_[user]
if self.item_bias:
out += self.item_bias_[item]
if isinstance(out, np.ndarray):
out = out[0]
return out
else:
n_users = max(self._A_pred.shape[0], self.user_bias_.shape[0])
n_items = max(self._B_pred.shape[0], self.item_bias_.shape[0])
if isinstance(self, CMF):
return c_funs.call_predict_X_old_collective_explicit(
self._A_pred,
self._B_pred,
self.user_bias_,
self.item_bias_,
self.glob_mean_,
np.array(user).astype(ctypes.c_int),
np.array(item).astype(ctypes.c_int),
self._k_pred, self.k_user, self.k_item, self._k_main_col,
self.nthreads
)
else:
return c_funs.call_predict_multiple(
self._A_pred,
self._B_pred,
self.user_bias_,
self.item_bias_,
self.glob_mean_,
np.array(user).astype(ctypes.c_int),
np.array(item).astype(ctypes.c_int),
self._k_pred, self.k_user, self.k_item, self._k_main_col,
self.nthreads
)
#### When passing the factors directly
else:
item = np.array([item]).reshape(-1)
nan_entries = (item == -1)
outp = self._B_pred[item, self.k_item:].reshape((item.shape[0],-1)).dot(a_vec[self.k_user:])
outp += a_bias + self.glob_mean_
if self.item_bias:
outp += self.item_bias_[item]
outp[nan_entries] = np.nan
return outp
def _predict_new(self, user, B):
n = B.shape[0]
user, _1, _2, _3 = self._process_users_items(user, None, None, None)
nan_entries = (user < 0) | \
(user >= max(self._A_pred.shape[0], self.user_bias_.shape[0]))
c_funs = wrapper_float if self.use_float else wrapper_double
if user.shape[0] != n:
raise ValueError("'user' must have the same number of entries as item info.")
return c_funs.call_predict_multiple(
self._A_pred,
B,
self.user_bias_,
np.zeros(n, dtype=self.dtype_) if self.item_bias \
else np.empty(0, dtype=self.dtype_),
self.glob_mean_,
np.array(user).astype(ctypes.c_int),
np.arange(n).astype(ctypes.c_int),
self._k_pred, self.k_user, self.k_item, self._k_main_col,
self.nthreads
)
def _predict_user_multiple(self, A, item, bias=None):
m = A.shape[0]
_1, item, _2, _3 = self._process_users_items(None, item, None, None)
nan_entries = (item < 0) | \
(item >= max(self._B_pred.shape[0], self.item_bias_.shape[0]))
c_funs = wrapper_float if self.use_float else wrapper_double
if item.shape[0] != m:
raise ValueError("'item' must have the same number of entries as user info.")
if bias is None:
bias = np.zeros(m, dtype=self.dtype_) if self.user_bias \
else np.empty(0, dtype=self.dtype_)
if isinstance(self, CMF):
return c_funs.call_predict_X_old_collective_explicit(
A,
self._B_pred,
bias,
self.item_bias_,
self.glob_mean_,
np.arange(m).astype(ctypes.c_int),
np.array(item).astype(ctypes.c_int),
self._k_pred, self.k_user, self.k_item, self._k_main_col,
self.nthreads
)
else:
return c_funs.call_predict_multiple(
A,
self._B_pred,
bias,
self.item_bias_,
self.glob_mean_,
np.arange(m).astype(ctypes.c_int),
np.array(item).astype(ctypes.c_int),
self._k_pred, self.k_user, self.k_item, self._k_main_col,
self.nthreads
)
def topN(self, user, n=10, include=None, exclude=None, output_score=False):
"""
Rank top-N highest-predicted items for an existing user
Parameters
----------
user : int or obj
User for which to rank the items. If 'X' passed to 'fit' was a
DataFrame, must match with the entries in its 'UserId' column,
otherwise should match with the rows of 'X'.
n : int
Number of top-N highest-predicted results to output.
include : array-like
List of items which will be ranked. If passing this, will only
make a ranking among these items. If 'X' passed to fit was a
DataFrame, must match with the entries in its 'ItemId' column,
otherwise should match with the columns of 'X'. Can only pass
one of 'include or 'exclude'.
exclude : array-like
List of items to exclude from the ranking. If passing this, will
rank all the items except for these. If 'X' passed to fit was a
DataFrame, must match with the entries in its 'ItemId' column,
otherwise should match with the columns of 'X'. Can only pass
one of 'include or 'exclude'.
output_score : bool
Whether to output the scores in addition to the IDs. If passing
'False', will return a single array with the item IDs, otherwise
will return a tuple with the item IDs and the scores.
Returns
-------
items : array(n,)
The top-N highest predicted items for this user. If the 'X' data passed to
fit was a DataFrame, will contain the item IDs from its column
'ItemId', otherwise will be integers matching to the columns of 'X'.
scores : array(n,)
The predicted scores for the top-N items. Will only be returned
when passing ``output_score=True``, in which case the result will
be a tuple with these two entries.
"""
if user is None:
raise ValueError("Must pass a valid user.")
return self._topN(user=user, a_vec=None, a_bias=None, n=n,
include=include, exclude=exclude,
output_score=output_score)
def _topN(self, user=None, a_vec=None, a_bias=0, B=None,
n=10, include=None, exclude=None, output_score=False):
assert self.is_fitted_
if self._only_prediction_info:
raise ValueError("Cannot use this function after dropping non-essential matrices.")
user, _, include, exclude = self._process_users_items(user, None, include, exclude)
c_funs = wrapper_float if self.use_float else wrapper_double
if (include.shape[0] > 0) and (include.shape[0] < n):
raise ValueError("'include' has fewer than 'n' entries.")
if (exclude.shape[0] > 0) and ((self._B_pred.shape[0] - exclude.shape[0]) < n):
msg = "'exclude' has a number of entries which leaves behind "
msg += "fewer than 'n' to rank."
raise ValueError(msg)
if user is not None:
user = user[0]
a_vec = self._A_pred[user].reshape(-1)
user_bias_ = 0.
if self.user_bias:
if user is not None:
user_bias_ = self.user_bias_[user]
else:
user_bias_ = a_bias
outp_ix, outp_score = c_funs.call_topN(
a_vec,
(self._B_pred[:self._n_orig] if not self.include_all_X else self._B_pred) if B is None else B,
self.item_bias_ if B is None else \
(np.zeros(n, dtype=self.dtype_) if self.item_bias \
else np.empty(0, dtype=self.dtype_)),
self.glob_mean_, user_bias_,
include,
exclude,
n,
self._k_pred, self.k_user, self.k_item, self._k_main_col,
bool(output_score),
self.nthreads
)
if (self.reindex_) and (B is None):
outp_ix = self.item_mapping_[outp_ix]
if output_score:
return outp_ix, outp_score
else:
return outp_ix
def _factors_cold(self, U=None, U_bin=None, U_col=None, U_val=None):
assert self.is_fitted_
if (self.C_.shape[0] == 0) and (self.Cbin_.shape[0] == 0):
raise ValueError("Method is only available when fitting the model to user side info.")
c_funs = wrapper_float if self.use_float else wrapper_double
U, U_col, U_val, U_bin = self._process_new_U(U, U_col, U_val, U_bin)
if isinstance(self.lambda_, np.ndarray):
lambda_ = self.lambda_[2]
lambda_bias = self.lambda_[0]
else:
lambda_ = self.lambda_
lambda_bias = self.lambda_
if isinstance(self.l1_lambda, np.ndarray):
l1_lambda = self.l1_lambda[2]
l1_lambda_bias = self.l1_lambda[0]
else:
l1_lambda = self.l1_lambda
l1_lambda_bias = self.l1_lambda
if not self._implicit:
_, a_vec = c_funs.call_factors_collective_explicit_single(
np.empty(0, dtype=self.dtype_),
np.empty(0, dtype=self.dtype_),
np.empty(0, dtype=self.dtype_),
np.empty(0, dtype=ctypes.c_int),
np.empty(0, dtype=self.dtype_),
U,
U_val,
U_col,
U_bin,
self._U_colmeans,
self.item_bias_,
self.B_,
self._B_plus_bias,
self.C_,
self.Cbin_,
self.Bi_,
self._BtB,
self._TransBtBinvBt,
self._BtXbias,
self._BeTBeChol,
self._BiTBi,
self._CtC,
self._TransCtCinvCt,
self._CtUbias,
self.glob_mean_,
self._n_orig,
self.k, self.k_user, self.k_item, self.k_main,
lambda_, lambda_bias,
l1_lambda, l1_lambda_bias,
self.scale_lam, self.scale_lam_sideinfo,
self.scale_bias_const, self.scaling_biasA_,
self.w_user, self.w_main, self.w_implicit,
self.user_bias,
self.NA_as_zero_user, self.NA_as_zero,
self.nonneg,
self.add_implicit_features,
self.include_all_X
)
else:
a_vec = c_funs.call_factors_collective_implicit_single(
np.empty(0, dtype=self.dtype_),
np.empty(0, dtype=ctypes.c_int),
U,
U_val,
U_col,
self._U_colmeans,
self.B_,
self.C_,
self._BeTBe,
self._BtB,
self._BeTBeChol,
self._CtUbias,
self.k, self.k_user, self.k_item, self.k_main,
lambda_, l1_lambda, self.alpha,
self._w_main_multiplier,
self.w_user, self.w_main,
self.apply_log_transf,
self.NA_as_zero_user,
self.nonneg
)
return a_vec
def _factors_warm_common(self, X=None, X_col=None, X_val=None, W=None,
U=None, U_bin=None, U_col=None, U_val=None,
return_bias=False, exact=False, output_a=False):
assert self.is_fitted_
if (return_bias) and (not self.user_bias):
raise ValueError("Cannot return bias with model that was fit without it.")
if ((X_col is not None) and (X_val is None)) or ((X_col is None) and (X_val is not None)):
raise ValueError("Must pass 'X_col' and 'X_val' together.")
if (X_col is not None) and (X is not None):
raise ValueError("Can only pass 'X' in one format.")
if (X is None) and (X_col is None):
raise ValueError("Must pass 'X' in some format.")
if (self.C_.shape[0] == 0) and (U is not None or U_col is not None or U_val is not None):
raise ValueError("Cannot pass user information if the model was not fit to it.")
if (self.Cbin_.shape[0] == 0) and (U_bin is not None):
raise ValueError("Cannot pass binary user information if the model was not fit to it.")
if (U is not None) or (U_val is not None) or (U_bin is not None):
U, U_col, U_val, U_bin = self._process_new_U(U, U_col, U_val, U_bin)
else:
U = np.empty(0, dtype=self.dtype_)
U_bin = np.empty(0, dtype=self.dtype_)
U_val = np.empty(0, dtype=self.dtype_)
U_col = np.empty(0, dtype=ctypes.c_int)
if X is not None:
X_col = np.empty(0, dtype=ctypes.c_int)
X_val = np.empty(0, dtype=self.dtype_)
W_sp = np.empty(0, dtype=self.dtype_)
if len(X.shape) > 1:
warnings.warn("Passed a 2-d array for 'X' - method expects a single row.")
X = np.array(X).reshape(-1)
if X.dtype != self.dtype_:
X = X.astype(self.dtype_)
if X.shape[0] != self._n_orig:
raise ValueError("'X' must have the same columns as when passed to 'fit'.")
if W is not None:
W_dense = np.array(W).reshape(-1)
if W_dense.dtype != self.dtype_:
W_dense = W_dense.astype(self.dtype_)
if W_dense.shape[0] != X.shape[0]:
raise ValueError("'W' must have the same number of entries as X.")
else:
W_dense = np.empty(0, dtype=self.dtype_)
else:
X = np.empty(0, dtype=self.dtype_)
W_dense = np.empty(0, dtype=self.dtype_)
X_val = np.array(X_val).reshape(-1)
if X_val.dtype != self.dtype_:
X_val = X_val.astype(self.dtype_)
if X_val.shape[0] == 0:
X_col = np.array(X_col).reshape(-1)
if X_col.dtype != ctypes.c_int:
X_col = X_col.astype(ctypes.c_int)
if X_col.shape[0] > 0:
raise ValueError("'X_col' and 'X_val' must have the same number of entries.")
else:
if self.reindex_:
X_col = np.array(X_col).reshape(-1)
X_col = | pd.Categorical(X_col, self.item_mapping_) | pandas.Categorical |
import sys
import pytz
import hashlib
import numpy as np
import pandas as pd
from datetime import datetime
def edit_form_link(link_text='Submit edits'):
"""Return HTML for link to form for edits"""
return f'<a href="https://docs.google.com/forms/d/e/1FAIpQLScw8EUGIOtUj994IYEM1W7PfBGV0anXjEmz_YKiKJc4fm-tTg/viewform">{link_text}</a>'
def add_google_analytics(input_html):
"""
Return HTML with Google Analytics block added
"""
ga_block = """
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-173043454-1"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-173043454-1');
</script>
"""
output_html = input_html.replace('<!-- replace with google analytics -->', ga_block)
return output_html
def add_geojson(shape_gdf, field_name, field_value, input_html):
"""
Add a GeoJSON feature as a Javascript variable to an HTML string
This variable will be used to calculate the bounds of the map
"""
shape_row = shape_gdf[shape_gdf[field_name] == field_value].copy()
shape_geo = shape_row.geometry.iloc[0]
geo_bounds = shape_geo.boundary[0].xy
output_string = '[['
for idx, value in enumerate(geo_bounds[0]):
if idx > 0:
output_string += ','
output_string += '['
x = geo_bounds[0][idx]
output_string += '{}'.format(x)
y = geo_bounds[1][idx]
output_string += ', {}'.format(y)
output_string += ']\n'
output_string += ']]'
output_html = input_html.replace('REPLACE_WITH_XY', output_string)
return output_html
def dc_coordinates():
"""Return coordinates for a DC-wide map"""
dc_longitude = -77.016243706276569
dc_latitude = 38.894858329321485
dc_zoom_level = 10.3
return dc_longitude, dc_latitude, dc_zoom_level
def anc_names(anc_id):
"""
Return formatted ANC names
"""
ancs = pd.read_csv('data/ancs.csv')
anc_upper = 'ANC' + anc_id
anc_lower = anc_upper.lower()
anc_neighborhoods = ancs[ancs['anc_id'] == anc_id]['neighborhoods'].values[0]
return anc_upper, anc_lower, anc_neighborhoods
def assemble_divo():
"""
Return DataFrame with one row per SMD and various stats about each SMD's ranking
divo = district-votes
"""
results = pd.read_csv('data/results.csv')
districts = pd.read_csv('data/districts.csv')
votes_per_smd = pd.DataFrame(results.groupby('smd_id').votes.sum()).reset_index()
# Calculate number of SMDs in each Ward and ANC
smds_per_ward = pd.DataFrame(districts.groupby('ward').size(), columns=['smds_in_ward']).reset_index()
smds_per_anc = pd.DataFrame(districts.groupby('anc_id').size(), columns=['smds_in_anc']).reset_index()
divo = pd.merge(districts, votes_per_smd, how='inner', on='smd_id')
divo = pd.merge(divo, smds_per_ward, how='inner', on='ward')
divo = pd.merge(divo, smds_per_anc, how='inner', on='anc_id')
divo['smds_in_dc'] = len(districts)
# Rank each SMD by the number of votes recorded for ANC races within that SMD
# method = min: assigns the lowest rank when multiple rows are tied
divo['rank_dc'] = divo['votes'].rank(method='min', ascending=False)
divo['rank_ward'] = divo.groupby('ward').votes.rank(method='min', ascending=False)
divo['rank_anc'] = divo.groupby('anc_id').votes.rank(method='min', ascending=False)
# Create strings showing the ranking of each SMD within its ANC, Ward, and DC-wide
divo['string_dc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_dc'])} out of {row['smds_in_dc']} SMDs", axis=1)
divo['string_ward'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_ward'])} out of {row['smds_in_ward']} SMDs", axis=1)
divo['string_anc'] = divo.apply(
lambda row: f"{make_ordinal(row['rank_anc'])} out of {row['smds_in_anc']} SMDs", axis=1)
average_votes_in_dc = divo.votes.mean()
average_votes_by_ward = divo.groupby('ward').votes.mean()
average_votes_by_anc = divo.groupby('anc_id').votes.mean()
return divo
def list_commissioners(status=None, date_point=None):
"""
Return dataframe with list of commissioners by status
Options:
status=None (all statuses returned) -- default
status='former'
status='current'
status='future'
date_point=None -- all statuses calculated from current DC time (default)
date_point=(some other datetime) -- all statuses calculated from that datetime
"""
commissioners = pd.read_csv('data/commissioners.csv')
if not date_point:
tz = pytz.timezone('America/New_York')
date_point = datetime.now(tz)
commissioners['start_date'] = pd.to_datetime(commissioners['start_date']).dt.tz_localize(tz='America/New_York')
commissioners['end_date'] = pd.to_datetime(commissioners['end_date']).dt.tz_localize(tz='America/New_York')
# Create combined field with start and end dates, showing ambiguity
commissioners['start_date_str'] = commissioners['start_date'].dt.strftime('%B %-d, %Y')
commissioners['end_date_str'] = commissioners['end_date'].dt.strftime('%B %-d, %Y')
# We don't have exact dates when these commissioners started, so show "circa 2019"
commissioners.loc[commissioners['start_date_str'] == 'January 2, 2019', 'start_date_str'] = '~2019'
# Combine start and end dates into one field
commissioners['term_in_office'] = commissioners['start_date_str'] + ' to ' + commissioners['end_date_str']
commissioners['is_former'] = commissioners.end_date < date_point
commissioners['is_current'] = (commissioners.start_date < date_point) & (date_point < commissioners.end_date)
commissioners['is_future'] = date_point < commissioners.start_date
# Test here that there is, at most, one "Current" and one "Future" commissioner per SMD.
# Multiple "Former" commissioners is allowed
smd_count = commissioners.groupby('smd_id')[['is_former', 'is_current', 'is_future']].sum().astype(int)
# smd_count.to_csv('smd_commissioner_count.csv')
if smd_count['is_current'].max() > 1 or smd_count['is_future'].max() > 1:
raise Exception('Too many commissioners per SMD')
if status:
commissioner_output = commissioners[commissioners['is_' + status]].copy()
else:
commissioner_output = commissioners.copy()
return commissioner_output
def build_results_candidate_people():
"""
Return DataFrame containing results, candidates, and people joined
"""
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
results = pd.read_csv('data/results.csv')
results_candidates = pd.merge(
results #[['candidate_id', 'person_id', 'smd_id']]
, candidates #[['candidate_id']]
, how='left'
, on=['candidate_id', 'smd_id']
)
rcp = pd.merge(results_candidates, people, how='left', on='person_id') # results-candidates-people
# Determine who were incumbent candidates at the time of the election
election_date = datetime(2020, 11, 3, tzinfo=pytz.timezone('America/New_York'))
commissioners = list_commissioners(status=None)
incumbents = commissioners[(commissioners.start_date < election_date) & (election_date < commissioners.end_date)]
incumbent_candidates = pd.merge(incumbents, candidates, how='inner', on='person_id')
incumbent_candidates['is_incumbent'] = True
rcp = pd.merge(rcp, incumbent_candidates[['candidate_id', 'is_incumbent']], how='left', on='candidate_id')
rcp['is_incumbent'] = rcp['is_incumbent'].fillna(False)
# Sort by SMD ascenting, Votes descending
rcp = rcp.sort_values(by=['smd_id', 'votes'], ascending=[True, False])
# Placeholder name for all write-in candidates.
# We do not know the combination of name and vote count for write-in candidates
# We only know the name of the write-in winners
rcp['full_name'] = rcp['full_name'].fillna('Write-ins combined')
rcp['write_in_winner_int'] = rcp['write_in_winner'].astype(int)
return rcp
def build_district_comm_commelect():
"""
Build DataFrame showing commissioner and commissioner-elect for every district
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status=None)
people = pd.read_csv('data/people.csv')
cp = pd.merge(commissioners, people, how='inner', on='person_id')
# left join to both current commissioners and commissioners-elect
cp_current = pd.merge(districts, cp.loc[cp['is_current'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current = cp_current.rename(columns={'full_name': 'current_commissioner', 'person_id': 'current_person_id'})
cp_current_future = pd.merge(cp_current, cp.loc[cp['is_future'], ['smd_id', 'person_id', 'full_name']], how='left', on='smd_id')
cp_current_future = cp_current_future.rename(columns={'full_name': 'commissioner_elect', 'person_id': 'future_person_id'})
# If there is not a current commissioner for the SMD, mark the row as "vacant"
cp_current_future['current_commissioner'] = cp_current_future['current_commissioner'].fillna('(vacant)')
return cp_current_future
def build_smd_html_table(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates with number of votes
"""
rcp = build_results_candidate_people()
# Bold the winners in this text field
# results_field = 'Candidates and Results (Winner in Bold)'
# rcp[results_field] = rcp.apply(
# lambda row:
# '<strong>{} ({:,.0f} votes)</strong>'.format(row['full_name'], row['votes'])
# if row['winner']
# else '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
# , axis=1
# )
results_field = 'Candidates and Results'
rcp[results_field] = rcp.apply(
lambda row: '{} ({:,.0f} votes)'.format(row['full_name'], row['votes'])
, axis=1
)
# Aggregate results by SMD
district_results = rcp.groupby('smd_id').agg({
'votes': sum
, results_field: lambda x: ', '.join(x)
, 'write_in_winner_int': sum
})
total_votes_display_name = 'ANC Votes'
district_results[total_votes_display_name] = district_results['votes']
max_votes_for_bar_chart = district_results[total_votes_display_name].max()
district_comm_commelect = build_district_comm_commelect()
dcp_results = pd.merge(district_comm_commelect, district_results, how='left', on='smd_id')
display_df = dcp_results[dcp_results['smd_id'].isin(list_of_smds)].copy()
display_df['SMD'] = (
f'<a href="{link_path}' + display_df['smd_id'].str.replace('smd_','').str.lower() + '.html">'
+ display_df['smd_id'].str.replace('smd_','') + '</a>'
)
display_df['Current Commissioner'] = display_df['current_commissioner']
display_df['Commissioner-Elect'] = display_df['commissioner_elect']
# Append "write-in" to Commissioners-Elect who were write-in candidates
display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] = display_df.loc[display_df['write_in_winner_int'] == 1, 'Commissioner-Elect'] + ' (write-in)'
columns_to_html = ['SMD', 'Current Commissioner']
css_uuid = hashlib.sha224(display_df[columns_to_html].to_string().encode()).hexdigest() + '_'
html = (
display_df[columns_to_html]
.fillna('')
.style
# .set_properties(
# subset=[results_field]
# , **{
# 'text-align': 'left'
# , 'width': '700px'
# , 'height': '45px'
# }
# )
# .set_properties(
# subset=[total_votes_display_name]
# , **{'text-align': 'left'}
# )
.set_properties(
subset=['Current Commissioner']
, **{'width': '230px', 'text-align': 'left'} # 230px fits the longest commissioner name on one row
) # why is the width in pixels so different between these columns?
# .format({total_votes_display_name: '{:,.0f}'})
# .bar(
# subset=[total_votes_display_name]
# , color='#cab2d6' # light purple
# , vmin=0
# , vmax=3116
# )
.set_uuid(css_uuid)
.hide_index()
.render()
)
return html
def build_smd_html_table_candidates(list_of_smds, link_path=''):
"""
Return an HTML table with one row per district for a given list of SMDs
Contains current commissioner and all candidates by status
"""
districts = pd.read_csv('data/districts.csv')
commissioners = list_commissioners(status='current')
people = pd.read_csv('data/people.csv')
candidates = pd.read_csv('data/candidates.csv')
candidate_statuses = | pd.read_csv('data/candidate_statuses.csv') | pandas.read_csv |
###############################################
# #
# Interfacing with Excel Module to build DSM #
# #
# Contrib: uChouinard #
# V0 03/03/2019 #
# #
###############################################
import DSM as dsmx
import SystemDependencies as dpx
import DependencyIndex as dpi
import pandas as pds
from openpyxl import load_workbook
#purely Based on Negative Dependencies DSM
class NDI_Interfacer:
def __init__(self, input_filename, output_filename=''):
self.input_filename=input_filename
self.output_filename=''
if output_filename is '':
self.output_filename=input_filename
self.sys=sys=dpx.System('')
def dsmBuilder(self):
#Fetching Relevant Affecting/Affected Info
df= | pds.read_excel(self.input_filename, 'Input_Level') | pandas.read_excel |
from itertools import product
import pytest
import numpy as np
import pandas as pd
import iguanas.rule_scoring.rule_scoring_methods as rsm
import iguanas.rule_scoring.rule_score_scalers as rss
from iguanas.rule_scoring import RuleScorer
from iguanas.metrics.classification import Precision
@pytest.fixture
def create_data():
np.random.seed(0)
X_rules = pd.DataFrame({
'A': np.random.randint(0, 2, 1000),
'B': np.random.randint(0, 2, 1000),
'C': np.random.randint(0, 2, 1000),
})
y = pd.Series(np.random.randint(0, 2, 1000))
weights = (y + 1) * 2
return X_rules, y, weights
@pytest.fixture
def expected_results():
expected_results = {
('LR', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -10.0, 'B': -100.0, 'C': -32.0}),
('LR', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 10.0, 'B': 100.0, 'C': 32.0}),
('LR', 'CS(-100)', 'No weights'): pd.Series({'A': -69.0, 'B': -100.0, 'C': -77.0}),
('LR', 'CS(100)', 'No weights'): pd.Series({'A': 69.0, 'B': 100.0, 'C': 77.0}),
('PS', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -10.0, 'B': -100.0, 'C': -36.0}),
('PS', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 10.0, 'B': 100.0, 'C': 36.0}),
('PS', 'CS(-100)', 'No weights'): pd.Series({'A': -91.0, 'B': -100.0, 'C': -94.0}),
('PS', 'CS(100)', 'No weights'): pd.Series({'A': 91.0, 'B': 100.0, 'C': 94.0}),
('RFS', 'MMS(-100, -10)', 'No weights'): pd.Series({'A': -100.0, 'B': -26.0, 'C': -10.0}),
('RFS', 'MMS(100, 10)', 'No weights'): pd.Series({'A': 100.0, 'B': 26.0, 'C': 10.0}),
('RFS', 'CS(-100)', 'No weights'): pd.Series({'A': -100.0, 'B': -73.0, 'C': -68.0}),
('RFS', 'CS(100)', 'No weights'): pd.Series({'A': 100.0, 'B': 73.0, 'C': 68.0}),
('LR', 'MMS(-100, -10)', 'Weights'): pd.Series({'A': -10, 'B': -100, 'C': -31}),
('LR', 'MMS(100, 10)', 'Weights'): pd.Series({'A': 10, 'B': 100, 'C': 31}),
('LR', 'CS(-100)', 'Weights'): pd.Series({'A': -69, 'B': -100, 'C': -76}),
('LR', 'CS(100)', 'Weights'): | pd.Series({'A': 69, 'B': 100, 'C': 76}) | pandas.Series |
# This file contains functions that complete dataset with missing entries
import pandas as pd
import numpy as np
from utils.data import *
# Method 1
def complete_by_value(data, value=0, print_time=False):
"""
Replace NaN with `value` passed as argument
"""
if print_time:
tt = time.process_time()
data = data.copy()
data_protected = data.X[data.protected_features].copy()
data_unprotected = data.X.drop(columns=data.protected_features).copy()
data_unprotected = data_unprotected.fillna(value).astype(data.types.drop(data.protected_features))
data.X = pd.concat([data_unprotected, data_protected], axis=1)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 2
def complete_by_mean_col(data, print_time=False):
"""
Fill missing entries using the mean of that column
"""
if print_time:
tt = time.process_time()
data = data.copy()
data_protected = data.X[data.protected_features].copy()
data_unprotected = data.X.drop(columns=data.protected_features).copy()
data_unprotected = data_unprotected.fillna(data_unprotected.mean()).astype(data.types.drop(data.protected_features))
data.X = pd.concat([data_unprotected, data_protected], axis=1)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 2 version 2
def complete_by_mean_col_v2(data, print_time=False, target_feature=None):
"""
Fill missing entries using the mean of the column from opposite group (defined by `target_feature`)
For example, entries for `race`="African-American" will be imputed from rows whose `race` is not "African-American"
"""
if print_time:
tt = time.process_time()
data = data.copy()
if target_feature:
assert target_feature in data.protected_features
target_unique_values = data.X[target_feature].unique().tolist()
assert len(target_unique_values) > 0
if len(target_unique_values) < 2:
print("Warning: complete_by_mean_col_v2: only one unique value found for target feature")
return complete_by_mean_col(data, print_time=print_time)
imputed_parts = []
for value in target_unique_values:
data_train = data.X[data.X[target_feature] != value].drop(columns=data.protected_features).copy()
data_protected = data.X[data.X[target_feature] == value][data.protected_features].copy()
data_unprotected = data.X[data.X[target_feature] == value].drop(columns=data.protected_features).copy()
data_unprotected = data_unprotected.fillna(data_train.mean()).astype(data.types.drop(data.protected_features))
imputed_parts.append(pd.concat([data_unprotected, data_protected], axis=1))
data_X = imputed_parts[0]
idx = 1
while idx < len(imputed_parts):
data_X = pd.concat([data_X, imputed_parts[idx]], axis=0)
idx += 1
assert data_X.shape == data.X.shape
data.X = data_X.sort_index()
else:
print("Warning: You're using V2 mean imputation, but didn't set a value for target_feature. Will perform V1.")
return complete_by_mean_col(data, print_time=print_time)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 3
def complete_by_nearby_row(data, print_time=False):
"""
Fill the missing entries by nearby values
"""
if print_time:
tt = time.process_time()
data = data.copy()
data_protected = data.X[data.protected_features].copy()
data_unprotected = data.X.drop(columns=data.protected_features).copy()
data_unprotected = data_unprotected.fillna(method="ffill")
data_unprotected = data_unprotected.fillna(method="bfill").astype(data.types.drop(data.protected_features))
data.X = pd.concat([data_unprotected, data_protected], axis=1)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 4
def complete_by_similar_row(data, print_time=False, K=5):
"""
Fill the missing entries by values from most similar rows, found by KNN
"""
if print_time:
tt = time.process_time()
data = data.copy()
data_protected = data.X[data.protected_features].copy()
data_unprotected = data.X.drop(columns=data.protected_features).copy()
imputer = KNNImputer(n_neighbors=K, weights="uniform") # by default use euclidean distance
data_unprotected = pd.DataFrame(imputer.fit_transform(data_unprotected), columns=data_unprotected.columns).astype(data.types.drop(data.protected_features))
data.X = pd.concat([data_unprotected, data_protected], axis=1)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 4 version 2
def complete_by_similar_row_v2(data, print_time=False, K=5, target_feature=None):
"""
Fill the missing entries by values from most similar rows, found by KNN
KNN is fit on opposite group data, where opposite group is defined by `target_feature`
For example, entries for `race`="African-American" will be imputed from rows whose `race` is not "African-American"
"""
if print_time:
tt = time.process_time()
data = data.copy()
if target_feature:
assert target_feature in data.protected_features
target_unique_values = data.X[target_feature].unique().tolist()
assert len(target_unique_values) > 0
if len(target_unique_values) < 2:
print("Warning: complete_by_similar_row_v2: only one unique value found for target feature")
return complete_by_similar_row(data, print_time=print_time, K=K)
imputed_parts = []
for value in target_unique_values:
imputer = KNNImputer(n_neighbors=K, weights="uniform")
data_train = data.X[data.X[target_feature] != value].drop(columns=data.protected_features).copy()
imputer.fit(data_train)
data_protected = data.X[data.X[target_feature] == value][data.protected_features].copy()
data_unprotected = data.X[data.X[target_feature] == value].drop(columns=data.protected_features).copy()
data_unprotected = pd.DataFrame(imputer.transform(data_unprotected), columns=data_unprotected.columns, index=data_unprotected.index).astype(data.types.drop(data.protected_features))
imputed_parts.append(pd.concat([data_unprotected, data_protected], axis=1))
data_X = imputed_parts[0]
idx = 1
while idx < len(imputed_parts):
data_X = pd.concat([data_X, imputed_parts[idx]], axis=0)
idx += 1
assert data_X.shape == data.X.shape
data.X = data_X.sort_index()
else:
print("Warning: You're using V2 similar imputation, but didn't set a value for target_feature. Will perform V1.")
return complete_by_similar_row(data, print_time=print_time, K=K)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 5
def complete_by_most_freq(data, print_time=False):
"""
Fill the missing entries by the most frequent value from that column
"""
if print_time:
tt = time.process_time()
data = data.copy()
data_protected = data.X[data.protected_features].copy()
data_unprotected = data.X.drop(columns=data.protected_features).copy()
data_unprotected.fillna(data_unprotected.mode().iloc[0], inplace=True)
data.X = pd.concat([data_unprotected, data_protected], axis=1)
if print_time:
print("Performance Monitor: ({:.4f}s) ".format(time.process_time() - tt) + inspect.stack()[0][3])
return data
# Method 6
def complete_by_multi(data, print_time=False, num_outputs=10, verbose=0):
"""
Fill the missing entries by running multiple imputation (MICE)
Return a list of `Dataset` objects instead of single object
"""
if print_time:
tt = time.process_time()
data_new = []
imputer = IterativeImputer(max_iter=1, sample_posterior=True, verbose=verbose)
for _ in range(num_outputs):
data_copy = data.copy()
data_protected = data_copy.X[data_copy.protected_features].copy()
data_unprotected = data_copy.X.drop(columns=data_copy.protected_features).copy()
data_unprotected = pd.DataFrame(imputer.fit_transform(data_unprotected), columns=data_unprotected.columns).astype(data.types.drop(data.protected_features))
data_copy.X = | pd.concat([data_unprotected, data_protected], axis=1) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Library for demonstrating simple collaborative filtering
@author: <NAME>
"""
import os
import math
import numpy as np
import pandas as pd
import time
from statistics import mean
from math import sqrt
# convert the transaction data (long data) into a ratings matrix (wide data)
# assume the first two columns are user and item names (which may be strings or integers and may not be contiguous)
# also generate two lookup tables to map user and item names into indexes for accessing the ratings matrix
def makeratingsmatrix(trans):
trans = trans.iloc[:,0:3] # keep only first 3 columns
trans.columns = ['user','item','rating']
# create the mappings between user and item names (as in raw data) and the matrix row and column indexes
unames = np.sort(trans['user'].unique())
inames = np.sort(trans['item'].unique())
umap = dict(zip(unames,[i for i in range(len(unames))]))
imap = dict(zip(inames,[i for i in range(len(inames))]))
# create the ratings matrix, use average if multiple raings exist for same (user,item)
#users = trans.pivot(index='user', columns='item', values='rating').values # fast, but no averaging, rows & cols are alphnum order
users = pd.pivot_table(trans, index=['user'], columns=['item'], values=['rating'],aggfunc=[mean]).values # slower
return [users, umap, imap]
def head(arr,r=10,c=10):
nr, nc = arr.shape
with np.printoptions(threshold=np.inf):
if type(arr) == np.ndarray:
print(arr[0:min(r,nr),0:min(c,nc)])
else:
print(arr.iloc[0:min(r,nr),0:min(c,nc)])
def sparsity(arr):
return float(np.isnan(arr).sum()*100)/np.prod(arr.shape)
#return (1.0 - ( count_nonzero(arr) / float(arr.size) ))
def wtavg(vals, weights):
xy = vals * weights
weights = weights[np.isnan(xy) == False]
#if len(weights) == 0 : return np.nan
if sum(weights) == 0 : return np.nan
vals = vals[np.isnan(xy)==False]
return sum(vals * weights)/sum(weights)
def pearsonsim(x,y):
xy = x*y
x = x[np.isnan(xy)==False]
y = y[np.isnan(xy)==False]
if(len(x)==0): return np.nan
mx=mean(x)
my=mean(y)
rt = sqrt(sum((x-mx)**2)*sum((y-my)**2))
if (rt == 0): return np.nan #math.isnan(rt)==True or
return sum((x-mx)*(y-my))/rt
def cosinesim(x,y):
xy = x*y
x = x[np.isnan(xy)==False]
y = y[np.isnan(xy)==False]
if(len(x)==0): return np.nan
rt = sqrt(sum(x**2)*sum(y**2))
return sum(x*y)/rt
def euclidsim(x,y):
xy = x*y
x = x[np.isnan(xy)==False]
y = y[np.isnan(xy)==False]
z=(y-x)**2
sz=sqrt(sum(z))
return 1/(1+sz)
def euclidsimF(x,y):
xy = x*y
x = x[np.isnan(xy)==False]
y = y[np.isnan(xy)==False]
z=(y-x)**2
return 1/(1+sum(z))
def getitemsimsmatrix(ratsmatrix,simfun):
r,c = ratsmatrix.shape
matrx = list([])
for col1 in range(0,c):
simrow = [0]*col1
for col2 in range(col1,c):
simrow.append(simfun(ratsmatrix[:,col1],ratsmatrix[:,col2]))
matrx.append(simrow)
matrx = np.array(matrx)
matrx = matrx + matrx.T - np.diag(np.diag(matrx))
return matrx
def predictrating_UU(targetrats, ratsmatrix, targetitemindx, simfun):
return predictratings_UU(targetrats, ratsmatrix, doitems=[targetitemindx], simfun=simfun)[0]
def predictratings_UU(targetrats, ratsmatrix, doitems, simfun=pearsonsim):
sims = list([])
for row in ratsmatrix: sims.append(simfun(row,targetrats))
sims = np.array(sims)
with np.errstate(invalid='ignore'): sims[sims < 0] = np.nan
rats = list([])
for col in doitems: rats.append(wtavg(ratsmatrix[:,col],sims)) # assumes target rating is NA (if target in usersA)
return np.array(rats)
def predictrating_II(targetrats, itemsims, targetitemid):
return predictratings_II(targetrats, itemsims, doitems=[targetitemid])[0]
def predictratings_II(targetrats,itemsims,doitems):
seenitems = np.isnan(targetrats)==False
rats = list([])
for row in doitems:
rats.append(wtavg(targetrats[seenitems],itemsims[row,seenitems]))
return np.array(rats)
def getRecommendations_UU(targetrats, ratsmatrix, imap, simfun=pearsonsim,topN=5):
itemnames=list(imap.keys())
unseenitemids = np.where(np.isnan(targetrats)==True)[0]
ratsA = predictratings_UU(targetrats, ratsmatrix, doitems=unseenitemids, simfun=simfun)
rats = | pd.DataFrame(ratsA,index=[itemnames[i] for i in unseenitemids],columns=['predrating']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
(train_df, train_indexes), (test_df, test_indexes) = df.vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, 2.0, 2.0, np.nan, 1.0, 1.0]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.MultiIndex.from_tuples([
(0, 'a'),
(0, 'b'),
(0, 'c'),
(1, 'a'),
(1, 'b'),
(1, 'c')
], names=['split_idx', None])
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
def test_range_split(self):
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(n=2)[0],
pd.DataFrame(
np.array([
[1., 4.],
[2., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(n=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2)[0],
pd.DataFrame(
np.array([
[1., 2., 3., 4.],
[2., 3., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2, 3], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-02', '2018-01-03'], dtype='datetime64[ns]', name='split_1', freq=None),
pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_2', freq=None),
pd.DatetimeIndex(['2018-01-04', '2018-01-05'], dtype='datetime64[ns]', name='split_3', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
df['a'].vbt.range_split(range_len=2)[1][i],
target[i]
)
pd.testing.assert_frame_equal(
df['a'].vbt.range_split(range_len=2, n=3)[0],
pd.DataFrame(
np.array([
[1., 3., 4.],
[2., 4., np.nan]
]),
index=pd.RangeIndex(start=0, stop=2, step=1),
columns=pd.Int64Index([0, 1, 2], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02'], dtype='datetime64[ns]', name='split_0', freq=None),
| pd.DatetimeIndex(['2018-01-03', '2018-01-04'], dtype='datetime64[ns]', name='split_1', freq=None) | pandas.DatetimeIndex |
import os
import numpy as np
import pandas as pd
from collections import defaultdict
from .io import save_data, load_data, exists_data, save_results
from . import RAW_DATA_DIR
DATASETS = ['password', 'keypad', 'fixed_text', 'free_text', 'mobile']
MOBILE_SENSORS = ['pressure', 'tool_major', 'x', 'x_acceleration', 'x_rotation', 'y', 'y_acceleration',
'y_rotation', 'z_acceleration', 'z_rotation']
SUMMARY_COLS = ['Users', 'Samples/user', 'Total events', 'Min events/user', 'Max events/user',
'Events/sample', 'Mean user freq', 'Mean user period', 'Global freq', 'Mean global period']
KEYGROUP = {
# 'backspace': 'backspace',
'space': 'space',
'shift': 'shift',
'period': 'period',
'comma': 'comma'
}
for k in list('<KEY>'):
KEYGROUP[k] = 'left_letter'
for k in list('<KEY>'):
KEYGROUP[k] = 'right_letter'
FEATURE_FUNS = defaultdict(lambda: lambda df, col: df[col])
FEATURE_FUNS.update({
# Timing features
'tau': lambda df, col: df['timepress'].diff().fillna(np.median(df['timepress'].diff().dropna())),
'duration': lambda df, col: df['timerelease'] - df['timepress'],
# Event type functions
'none': lambda df, col: np.ones(len(df)),
'keygroup': lambda df, col: df['keyname'].map(KEYGROUP).fillna('other'),
'position': lambda df, col: np.arange(len(df)),
})
def preprocess_data(df, event_col, feature_cols):
def pp_fun(x, feature_cols=feature_cols):
x['event'] = FEATURE_FUNS[event_col](x, event_col)
for col in feature_cols:
x[col] = FEATURE_FUNS[col](x, col)
return x[['event'] + feature_cols]
if df.index.nlevels > 1:
level = np.arange(df.index.nlevels).tolist()
else:
level = 0
df = df.groupby(level=level).apply(pp_fun)
return df
def reduce_dataset(df, num_users=None,
min_samples=None, max_samples=None,
min_obs=None, max_obs=None):
'''
Reducing the size of a dateset is a common operation when a certain number
of observations, samples, or users is desired. This function limits each
of these by attempting to satisfy the constraints in the following order:
num observations
num samples
num users
'''
if max_obs:
df = df.groupby(level=[0, 1]).apply(lambda x: x[:max_obs]).reset_index(level=[2, 3], drop=True)
num_obs = df.groupby(level=[0, 1]).size()
if min_obs:
num_obs = num_obs[num_obs >= min_obs]
num_samples = num_obs.groupby(level=0).size()
if min_samples:
num_samples = num_samples[num_samples >= min_samples]
if num_users and num_users < len(num_samples):
users = np.random.permutation(num_samples.index.values)[:num_users]
else:
users = num_samples.index.values
num_obs = num_obs.loc[users.tolist()]
if max_samples:
num_obs = num_obs.groupby(level=0).apply(
lambda x: x.loc[np.random.permutation(np.sort(x.index.unique()))[:max_samples]]).reset_index(level=1,
drop=True)
df = df.loc[num_obs.index].sort_index()
return df
def _filter(df, max_dups=4, max_pause=6e4):
# Drop sessions with many duplicate times
s = df.reset_index().groupby(['user', 'session', 'timepress']).size()
s = s[s > max_dups].reset_index(level=2, drop=True)
dropme = s.index.unique()
df = df.drop(dropme)
# Drop sessions with more than 5 minute pauses
s = df.groupby(level=[0, 1]).apply(lambda x: np.any(x['timepress'].diff() > max_pause))
s = s[s]
dropme = s.index.unique()
df = df.drop(dropme)
# Drop 0 durations
df = df[df['timerelease'] - df['timepress'] > 0]
# Separate duplicate key presses by at least 1 ms
while np.any(df.groupby(level=[0, 1]).apply(lambda x: x['timepress'].diff() == 0)):
def _inc_timepress_dups(x):
idx = x['timepress'].diff().fillna(1) == 0
x.loc[idx, 'timepress'] += 1
x.loc[idx, 'timerelease'] += 1
return x.reset_index()
df = df.groupby(level=[0, 1]).apply(_inc_timepress_dups).set_index(['user', 'session'])
df = df.groupby(level=[0, 1]).apply(lambda x: x.sort_values('timepress'))
return df
def _normalize(df):
def norm_session_times(x):
t0 = x.iloc[0]['timepress']
x['timepress'] -= t0
x['timerelease'] -= t0
return x
df = df.groupby(level=[0, 1]).apply(norm_session_times)
df = df.reset_index()
df['user'] = df['user'].map(dict(zip(df['user'].unique(), range(len(df['user'].unique())))))
def renumber_sessions(x):
x['session'] = x['session'].map(dict(zip(sorted(x['session'].unique()), range(len(x['session'].unique())))))
return x
df = df.groupby('user').apply(renumber_sessions).set_index(['user', 'session'])
df = df.sort_index()
return df
def preprocess_password(fname_in):
def process_row(idx_row):
idx, row = idx_row
timepress = 1000 * np.r_[0, row[4::3].astype(float).values].cumsum()
timerelease = timepress + 1000 * row[3::3].astype(float).values
keyname = list('.tie5Roanl') + ['enter']
return pd.DataFrame.from_items([
('user', [row['subject']] * 11),
('session', [row['sessionIndex'] * 100 + row['rep']] * 11),
('keyname', keyname),
('timepress', timepress),
('timerelease', timerelease)
])
df = pd.concat(map(process_row, pd.read_csv(fname_in).iterrows())).set_index(['user', 'session'])
df = _normalize(df)
save_data(df, 'password')
return
def preprocess_keypad(fname_in):
df = pd.read_csv(fname_in, index_col=[0, 1])
# Discard incorrect entries
keynames = ['numpad_%s' % s for s in '9141937761'] + ['enter']
df = df.groupby(level=[0, 1]).filter(lambda x: (len(x) == 11) and (x['keyname'] == keynames).all())
df = _normalize(df)
save_data(df, 'keypad')
return
def preprocess_fixed_text(fname1_in, fname2_in, num_samples=4, num_obs=100):
df1 = pd.read_csv(fname1_in, index_col=[0, 1])
df2 = pd.read_csv(fname2_in, index_col=[0, 1])
df = pd.concat([df1[df1['inputtype'] == 'fixed'][['keyname', 'timepress', 'timerelease']],
df2[['keyname', 'timepress', 'timerelease']]])
df = _filter(df)
df = reduce_dataset(df, min_samples=num_samples, max_samples=num_samples, min_obs=num_obs, max_obs=num_obs)
df = _normalize(df)
save_data(df, 'fixed_text')
return
def preprocess_free_text(fname_in, num_samples=6, num_obs=500):
df = pd.read_csv(fname_in, index_col=[0, 1])
df = df[df['inputtype'] == 'free'][['keyname', 'timepress', 'timerelease']]
df = _filter(df)
df = reduce_dataset(df, min_samples=num_samples, max_samples=num_samples, min_obs=num_obs, max_obs=num_obs)
df = _normalize(df)
save_data(df, 'free_text')
return
def preprocess_mobile(fname_in, num_samples=20, num_users=None):
df = pd.read_csv(fname_in, index_col=[0, 1])
entities = {57: '9', 49: '1', 52: '4', 51: '3', 55: '7', 54: '6', 10: 'enter'}
df['keyname'] = df['entity'].map(entities)
df = df.dropna()
# Only correct entries
keynames = np.repeat(np.array(['9', '1', '4', '1', '9', '3', '7', '7', '6', '1', 'enter']), 2)
df = df.groupby(level=[0, 1]).filter(
lambda x: (len(x) == 22) and (x['keyname'] == keynames).all() and (x[::2]['action'] == 'press').all() and (
x[1::2]['action'] == 'release').all())
COLS = ['pressure',
'tool_major',
'x',
'x_acceleration',
'x_rotation',
'y',
'y_acceleration',
'y_rotation',
'z_acceleration',
'z_rotation']
df = df.reset_index()
press = df[df['action'] == 'press']
release = df[df['action'] == 'release']
press.columns = ['press_{}'.format(c) for c in df.columns]
release.columns = ['release_{}'.format(c) for c in df.columns]
release.index = release.index - 1
df = | pd.concat([press, release], axis=1) | pandas.concat |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
# 这两行代码解决 plt 中文显示的问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# read file
datafile = '../data/Sensitivity Analyse.xlsx'
data = pd.read_excel(datafile)
df = | DataFrame(data) | pandas.DataFrame |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@file: plate_perspective.py
@time: 2019-09-04 13:26
"""
import pandas as pd
from datetime import datetime, timedelta
import os
import sys
sys.path.append('/Users/luoyonggui/PycharmProjects/mayiutils_n1/mayiutils/db')
from pymongo_wrapper import PyMongoWrapper
sys.path.append('/Users/luoyonggui/PycharmProjects/mayiutils_n1/mayiutils')
from email_ops import send_email
import html_ops
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
stock_series = pd.read_pickle(os.path.join(PROJ_DIR, 'finance/data/stock_dict.pkl'))
fund_otc_series = pd.read_pickle(os.path.join(PROJ_DIR, 'finance/data/fund_otc_series.pkl'))
mongo = PyMongoWrapper()
time_range = 'near_1_year'
start_date = datetime.now() - timedelta(days=365)
stock_pool = [
# '东方航空',
# '南方航空',
# '白云机场',
# '中国核电',
# '山鹰纸业',
# '晨鸣纸业',
# '中信证券',
# '华泰证券',
# '国元证券',
# '兴业银行',
# '招商银行',
# '平安银行',
# '中国平安',
# '中国人寿',
# '中国太保',
# '贵州茅台',
# '五粮液',
# '泸州老窖',
'格力电器',
'美的集团',
'福耀玻璃',
'恒瑞医药',
]
# # plate = '银行'
# # plate = '证券'
# plate = '白酒'
# # 获取该板块的所有股票
# table = mongo.getCollection('finance', 'stock_basic')
# dfr = mongo.findAll(table, {'industry': plate}, returnFmt='df')
# print(dfr.shape)
# print(dfr.head())
dfr = pd.DataFrame(stock_pool, columns=['name'])
dfr['ts_code'] = dfr.name.map(lambda s: stock_series.loc[s])
# 基本面 area、industry、sw_industry、市值、市值排名
table = mongo.getCollection('finance', 'stock_basic')
df_base = mongo.findAll(table, {'ts_code': {'$in': dfr.ts_code.tolist()}}, fieldlist=['name', 'area', 'industry', 'market'],
returnFmt='df')
# print(df)
table = mongo.getCollection('finance', 'swclass')
dft = mongo.findAll(table, {'股票代码': {'$in': dfr.ts_code.map(lambda s: s.split('.')[0]).tolist()}},
fieldlist=['股票名称', '行业名称'],
returnFmt='df')
dft.columns = ['name', 'swclass']
df_base = pd.merge(df_base, dft, on='name')
table = mongo.getCollection('finance', 'stock_daily_basic1')
dft = mongo.findAll(table, {'$and': [{'trade_date': '20190926'}, {'ts_code': {'$in': dfr.ts_code.tolist()}}]},
fieldlist=['ts_code', '市盈率(总市值/净利润)', '市净率(总市值/净资产)', '总市值', '流通市值(万元)', 'rank'],
returnFmt='df')
dft = | pd.merge(dft, dfr, on='ts_code') | pandas.merge |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import locale
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, StringIO, u
from pandas._libs.tslib import Timestamp
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output)
for double_precision in (3, 9):
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exception):
ujson.encode(double_input, double_precision=invalid_val)
def test_encode_string_conversion2(self):
string_input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(string_input)
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
@pytest.mark.parametrize("unicode_input", [
"Räksmörgås اسامة بن محمد بن عوض بن لادن",
"\xe6\x97\xa5\xd1\x88"
])
def test_encode_unicode_conversion(self, unicode_input):
enc = ujson.encode(unicode_input)
dec = ujson.decode(enc)
assert enc == json_unicode(unicode_input)
assert dec == json.loads(enc)
def test_encode_control_escaping(self):
escaped_input = "\x19"
enc = ujson.encode(escaped_input)
dec = ujson.decode(enc)
assert escaped_input == dec
assert enc == json_unicode(escaped_input)
def test_encode_unicode_surrogate_pair(self):
surrogate_input = "\xf0\x90\x8d\x86"
enc = ujson.encode(surrogate_input)
dec = ujson.decode(enc)
assert enc == json_unicode(surrogate_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8(self):
four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_unicode_4bytes_utf8highest(self):
four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(four_bytes_input)
dec = ujson.decode(enc)
assert enc == json_unicode(four_bytes_input)
assert dec == json.loads(enc)
def test_encode_array_in_array(self):
arr_in_arr_input = [[[[]]]]
output = ujson.encode(arr_in_arr_input)
assert arr_in_arr_input == json.loads(output)
assert output == json.dumps(arr_in_arr_input)
assert arr_in_arr_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(arr_in_arr_input),
ujson.decode(output, numpy=True))
@pytest.mark.parametrize("num_input", [
31337,
-31337, # Negative number.
-9223372036854775808 # Large negative number.
])
def test_encode_num_conversion(self, num_input):
output = ujson.encode(num_input)
assert num_input == json.loads(output)
assert output == json.dumps(num_input)
assert num_input == ujson.decode(output)
def test_encode_list_conversion(self):
list_input = [1, 2, 3, 4]
output = | ujson.encode(list_input) | pandas._libs.json.encode |
import sys
import pandas as pd
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import f1_score
from sklearn.model_selection import RepeatedStratifiedKFold, GridSearchCV
from sklearn.pipeline import Pipeline
# Use cross-validation.
VALIDATING = False
# Find optimal model parameters using grid search.
GRIDSEARCH = False
if GRIDSEARCH:
import numpy as np
# Imputation strategy, use one of the following.
IMPUTE = None
# IMPUTE = 'mean'
# IMPUTE = 'median'
FTR_YEAR = 'year'
FTR_AGE = 'age'
FTR_MARITL = 'maritl'
FTR_RACE = 'race'
FTR_EDU = 'education'
FTR_JOB = 'jobclass'
FTR_HEALTH = 'health'
FTR_INS = 'health_ins'
FTR_WAGE = 'wage'
def handle_missing_values(data):
if not IMPUTE:
return data.dropna()
else:
# Drop samples with missing label, instead of imputing it:
data = data.loc[data[FTR_RACE].isin([0.0, 1.0, 2.0, 3.0])]
imputer = preprocessing.Imputer(strategy=IMPUTE, axis=0)
imputed = imputer.fit_transform(data)
return pd.DataFrame(imputed, columns=data.columns)
def encode_data(data):
data[FTR_MARITL] = data[FTR_MARITL].map(
{'1. Never Married': 0, '2. Married': 1, '3. Widowed': 2, '4. Divorced': 3, '5. Separated': 4})
data[FTR_RACE] = data[FTR_RACE].map(
{'1. White': 0, '2. Black': 1, '3. Asian': 2, '4. Other': 3})
data[FTR_EDU] = data[FTR_EDU].map(
{'1. < HS Grad': 0, '2. HS Grad': 1, '3. Some College': 2, '4. College Grad': 3, '5. Advanced Degree': 4})
data[FTR_JOB] = data[FTR_JOB].map(
{'1. Industrial': 0, '2. Information': 1})
data[FTR_HEALTH] = data[FTR_HEALTH].map(
{'1. <=Good': 0, '2. >=Very Good': 1})
data[FTR_INS] = data[FTR_INS].map(
{'1. Yes': 0, '2. No': 1})
return data
def solve(clf, x_train, y_train, x_test, y_test):
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
return f1_score(y_test, y_pred, average='micro')
def grid_search(clf, x, y, cv):
params = {'reduce_dim__n_components': [5, 6, 7],
'classify__max_depth': [3, 4, 5, 6, 7, 8, 9],
'classify__min_samples_split': np.linspace(1e-4, 1e-1, 4),
'classify__min_samples_leaf': np.linspace(1e-4, 1e-1, 4),
'classify__subsample': np.linspace(0.6, 1.0, 5),
'classify__max_features': np.linspace(0.1, 1.0, 5)}
gs = GridSearchCV(estimator=clf, param_grid=params,
scoring='f1_micro', cv=cv, n_jobs=2, verbose=1)
gs.fit(x, y)
print(gs.best_params_)
print(gs.best_score_)
def cross_validate(clf, x, y, cv):
i = 0
mean_score = 0
for train_index, test_index in cv.split(x, y):
i += 1
x_train, y_train = x[train_index], y[train_index]
x_test, y_test = x[test_index], y[test_index]
score = solve(clf, x_train, y_train, x_test, y_test)
if not GRIDSEARCH:
print('(iter {0}) score={1}'.format(i, score))
mean_score += score
mean_score /= i
print(mean_score)
return mean_score
def main():
train = | pd.read_csv(sys.argv[1]) | pandas.read_csv |
"""Functions to generate metafeatures using heuristics."""
import re
import numpy as np
import pandas as pd
from pandas.api import types
def _raise_if_not_pd_series(obj):
if not isinstance(obj, pd.Series):
raise TypeError(
f"Expecting `pd.Series type as input, instead of {type(obj)} type."
)
def _safe_div(num, denom):
EPSILON = 1e-8
return num / (denom + EPSILON)
def convert_to_numeric(series: pd.Series) -> pd.Series:
"""Retain and convert any numeric data points."""
return pd.to_numeric(series.copy(), errors="coerce").dropna()
def is_number_as_string(
series: pd.Series, shrinkage_threshold: float = 0.7
) -> bool:
"""
Check if string can be numerical.
Remove non-numerals from string and calculate relative reduction in string length.
shrinkage_threshold:
Numeric-like values that are extractable downstream
should have lengths below this value post-numeral removal.
Returns:
True if at least half of the values' relative post-shrinkage length is
at least `shrinkage_threshold`, and there is at least one value remaining
after numerical conversion.
"""
series = series.copy().astype(str)
nums_removed = series.apply(lambda x: re.sub(r"\D", "", x))
rel_post_shrinkage_len = _safe_div(
nums_removed.apply(len), series.apply(len)
)
most_values_contain_numbers = (
_safe_div(
(rel_post_shrinkage_len > shrinkage_threshold).sum(),
len(rel_post_shrinkage_len),
)
>= 0.5
)
at_least_one_value_remaining = bool(len(convert_to_numeric(series)))
return most_values_contain_numbers and at_least_one_value_remaining
def castable_as_numeric(series: pd.Series, threshold: float = 0.95) -> bool:
"""
Check if series values can be casted as numeric dtypes.
Returns:
True if at least `threshold` values can be casted as numerics.
"""
# Columns which are already of numeric dtype are considered not castable
if series.dtype in ["float", "int"]:
return False
return _safe_div(len(convert_to_numeric(series)), len(series)) >= threshold
def numeric_extractable(series: pd.Series, threshold: float = 0.95) -> bool:
"""
Check if numbers can be extracted from series values.
Returns:
True if at least `threshold` values contain numerics.
"""
# Columns which are already of numeric dtype are considered not extractable
if series.dtype in ["float", "int"]:
return False
series = series.copy().dropna().astype(str)
n_contains_digits = series.apply(
lambda x: any(char.isdigit() for char in x)
).sum()
return _safe_div(n_contains_digits, len(series)) >= threshold
def normalized_distinct_rate(df: pd.DataFrame) -> pd.Series:
"""
Calculate the % of distinct values relative to the number of non-null entries.
Arguments:
df {pd.DataFrame} -- Dataframe to analzye.
Returns:
pd.Series -- Normalized distinct rate.
"""
return _safe_div(df["num_distincts"], df["total_val"] - df["num_nans"])
def nan_rate(df: pd.DataFrame) -> pd.Series:
"""
Calculate the % of NaNs relative to the total number of data points.
Arguments:
df {pd.DataFrame} -- Dataframe to analyze.
Returns:
pd.Series -- NaN rate.
"""
return _safe_div(df["num_nans"], df["total_val"])
def avg_val_len(raw: pd.DataFrame) -> pd.Series:
"""
Get the average length values in the feature column.
Returns -1 if feature column is completely empty.
Arguments:
raw {pd.DataFrame} -- Raw dataframe to analyze.
Returns:
pd.Series -- Average length of elements in feature column
"""
result = []
for col in raw:
series = raw[col].dropna()
if not len(series):
result.append(-1)
continue
result.append(_safe_div(sum(len(str(x)) for x in series), len(series)))
return pd.Series(result, index=raw.columns)
def stddev_val_len(raw: pd.DataFrame) -> pd.Series:
"""
Get the standard deviation of length values in the feature column.
Returns -1 if feature column is completely empty.
Arguments:
raw {pd.DataFrame} -- Raw dataframe to analyze.
Returns:
pd.Series -- Standard deviation length of elements in feature column
"""
result = []
for col in raw:
series = raw[col].dropna()
if not len(series):
result.append(-1)
continue
result.append(np.std([len(str(x)) for x in series]))
return pd.Series(result, index=raw.columns)
def maybe_zipcode(raw: pd.DataFrame, threshold: float = 0.95) -> pd.Series:
"""
Infer if DataFrame might be a zipcode.
The three decision criteria are:
1. 'zip' appears in the name
2. At least `threshold` values look like US zipcodes (5 digits).
3. At least `threshold` values look like Canadian zipcodes (*#* #*#).
Arguments:
raw {pd.DataFrame} -- Raw pd.Series to analyze.
Keyword Arguments:
threshold {float} -- Minimum value for criterion to be considered met. (default: {0.95})
Returns:
pd.Series[int] -- Scores for each series in dataframe.
A point is given for each criterion met.
"""
return raw.apply(_maybe_zipcode)
def _maybe_zipcode(raw_s: pd.Series, threshold: float = 0.95) -> int:
"""
Infer if series might be a zipcode.
The three decision criteria are:
1. 'zip' appears in the name
2. At least `threshold` values look like US zipcodes (5 digits).
3. At least `threshold` values look like Canadian zipcodes (*#* #*#).
Arguments:
raw_s {pd.Series} -- Raw pd.Series to analyze.
Keyword Arguments:
threshold {float} -- Minimum value for criterion to be considered met. (default: {0.95})
Returns:
int -- Score. A point is given for each criterion met.
"""
_raise_if_not_pd_series(raw_s)
points = 0
# Criterion 1
if "zip" in str(raw_s.name):
points += 1
# Criterion 2
at_least_5_digits = raw_s.apply(
lambda x: len(str(x)) == 5 and str(x).isnumeric()
)
if _safe_div(at_least_5_digits.sum(), len(raw_s)) >= threshold:
points += 1
# Criterion 3
is_cad_zip = raw_s.apply(
lambda x: bool(re.search(r"\w\d\w\s?\d\w\d", str(x)))
)
if _safe_div(is_cad_zip.sum(), len(raw_s)) >= threshold:
points += 1
return points
def maybe_real_as_categorical(
df: pd.DataFrame, max_n_distinct: int = 20
) -> pd.Series:
"""
Evaluate if feature column might be categorical.
Check that values are numeric and at most `max_n_distinct` distinct values.
Arguments:
df {pd.DataFrame} -- Metafeatures.
Keyword Arguments:
max_n_distinct {int} -- Maximum number of default categories. (default: {20})
Returns:
pd.Series -- A boolean series on whether a model might be categorical or not.
"""
# Pick out sample columns, while ignoring other metafeatures including `samples_set`
samples = df[
[col for col in df.columns if "sample" in col and "samples" not in col]
]
is_numeric = []
for row in samples.itertuples(False):
coerced_numeric = pd.Series(
pd.to_numeric(row, errors="coerce")
).dropna()
if len(coerced_numeric) < samples.shape[1]:
is_numeric.append(False)
else:
is_numeric.append(types.is_numeric_dtype(coerced_numeric))
limited_distinct_values = df["num_distincts"] <= max_n_distinct
return is_numeric & limited_distinct_values
def has_zero_in_leading_decimals(df: pd.DataFrame) -> pd.Series:
"""
Check if each column in dataframe contains leading zeros.
This is an indicator that the column may be better coerced into an int dtype.
Arguments:
df {pd.DataFrame} -- DataFrame
Returns:
pd.Series -- Series of booleans.
"""
def func(x):
return not np.around((10 * np.remainder(x, 1))).astype(int).any()
return pd.Series(
[
func(df[col].values) if | types.is_float_dtype(df[col]) | pandas.api.types.is_float_dtype |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# to save report:
# clone the following repo: https://github.com/ihuston/jupyter-hide-code-html
# run in terminal: jupyter nbconvert --to html --template jupyter-hide-code-html/clean_output.tpl path/to/CGR_16S_Microbiome_QC_Report.ipynb
# name the above file NP###_pipeline_run_folder_QC_report.html and place it in the directory with the pipeline output
# for version control:
# Kernel > Restart & Clear Output
# run in terminal: jupyter nbconvert --to script CGR_16S_Microbiome_QC_Report.ipynb
# add/commit CGR_16S_Microbiome_QC_Report.ipynb AND CGR_16S_Microbiome_QC_Report.py to git
# # CGR 16S Microbiome QC Report
# <!-- <div id="toc_container"> -->
# <h2>Table of Contents</h2>
# <ul class="toc_list">
# <a href="#1 General-analysis-information">1 General analysis information</a><br>
# <ul>
# <a href="#1.1 Project-directory">1.1 Project directory</a><br>
# <a href="#1.2 Project-directory-contents">1.2 Project directory contents</a><br>
# <a href="#1.3 Parameters">1.3 Parameters</a><br>
# <a href="#1.4 Dependency-versions">1.4 Dependency versions</a><br>
# </ul>
# <a href="#2 Samples-included-in-the-project">2 Samples included in the project<br>
# <a href="#3 QC-checks">3 QC checks</a><br>
# <ul>
# <a href="#3.1 Read-trimming">3.1 Read trimming</a><br>
# <a href="#3.2 Proportion-of-non-bacterial-reads">3.2 Proportion of non-bacterial reads<br>
# <ul>
# <a href="#3.2.1 Proportion-of-non-bacterial-reads-per-sample-type">3.2.1 Proportion of non-bacterial reads per sample type<br>
# </ul>
# <a href="#3.3 Sequencing-depth-distribution-per-flow-cell">3.3 Sequencing distribution per flow cell</a><br>
# <a href="#3.4 Read-counts-after-filtering-in-blanks-vs.-study-samples">3.4 Read counts after filtering in blanks vs. study samples</a><br>
# <a href="#3.5 Sequential-sample--and-feature-based-filters">3.5 Sequential sample- and feature-based filters</a><br>
# <a href="#3.6 Biological-replicates">3.6 Biological replicates</a><br>
# <a href="#3.7 QC-samples">3.7 QC samples</a><br>
# </ul>
# <a href="#4 Rarefaction-threshold">4 Rarefaction threshold</a><br>
# <a href="#5 Alpha-diversity">5 Alpha diversity</a><br>
# <a href="#6 Beta-diversity">6 Beta diversity</a><br>
# <ul>
# <a href="#6.1 Bray-Curtis">6.1 Bray-Curtis</a><br>
# <a href="#6.2 Jaccard">6.2 Jaccard</a><br>
# <a href="#6.3 Weighted-UniFrac">6.3 Weighted UniFrac</a><br>
# <a href="#6.4 Unweighted-UniFrac">6.4 Unweighted UniFrac</a><br>
# </ul>
# </ul>
# In[ ]:
# allow user definition of column headers for certain things, eg sample type?
# <h2 id="1 General-analysis-information">1 General analysis information</h2>
# <h3 id="1.1 Project-directory">1.1 Project directory</h3>
# All production microbiome projects are located in `/DCEG/Projects/Microbiome/Analysis/`. There is a parent folder named with the project ID; that folder contains the [bioinformatic pipeline](https://github.com/NCI-CGR/QIIME_pipeline) runs for that project and a `readme` summarizing the changes between each run.
#
# - The initial run (always named `<datestamp>_initial_run`) is used for some QC checks and to evaluate parameter settings.
# - The second run implements additional read trimming and excludes water blanks, no-template controls, and QC samples (e.g. robogut or artificial colony samples). (NOTE: pick one of intentional dups?)
# - Additional runs are performed for study-specific reasons which are summarized in the `readme`.
# <br><br>
#
# __The project and pipeline run described in this report is located here:__
# In[ ]:
proj_dir='/DCEG/Projects/Microbiome/Analysis/Project_NP0453_MB2_and_3/20201020_dev_test'
ref_db='silva-132-99-515-806-nb-classifier'
# In[ ]:
get_ipython().run_line_magic('cd', '{proj_dir}')
# The contents of the `readme`, at the time of report generation:
# In[ ]:
get_ipython().system('cat ../README')
# <h3 id="1.2 Project-directory-contents">1.2 Project directory contents</h3>
# In[ ]:
get_ipython().system('ls')
# <h3 id="1.3 Parameters">1.3 Parameters</h3>
# In[ ]:
get_ipython().system('cat *.y[a]*ml')
# <h3 id="1.4 Dependency-versions">1.4 Dependency versions</h3>
# In[ ]:
get_ipython().system('cat $(ls -t Q2_wrapper.sh.o* | head -n1)')
# <h2 id="2 Samples-included-in-the-project">2 Samples included in the project</h2>
# The tables below show the count of samples grouped by metadata provided in the manifest.
# In[ ]:
from IPython.display import display
import os.path
get_ipython().run_line_magic('matplotlib', 'inline')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import glob
from skbio.stats.ordination import pcoa
from skbio import DistanceMatrix
sns.set(style="whitegrid")
# In[ ]:
manifest = pd.read_csv(glob.glob('*.txt')[0],sep='\t',index_col=0)
manifest.columns = map(str.lower, manifest.columns)
manifest = manifest.dropna(how='all', axis='columns')
manifest.columns = manifest.columns.str.replace(' ', '') # remove once cleaning is implemented in the pipeline
# In[ ]:
if len(manifest['run-id'].astype(str).str.split('_',n=2,expand=True).columns) > 1:
manifest['Sequencer'] = (manifest['run-id'].astype(str).str.split('_',n=2,expand=True))[1]
else:
print("Can not infer sequencer ID from run ID.")
if 'sourcepcrplate' in manifest.columns:
manifest['PCR_plate'] = (manifest['sourcepcrplate'].str.split('_',n=1,expand=True))[0]
else:
print("Source PCR Plate column not detected in manifest.")
# should probably save this file, or even better, include in original manifest prior to analysis....
# In[ ]:
m = manifest.drop(columns=['externalid','sourcepcrplate','project-id','extractionbatchid','fq1','fq2'],errors='ignore')
# when do we want to drop extraction ID? in this case, it's all unique values for QC samples and NaNs for study samples
# possibly look for (# unique values == # non-nan values) instead of alßways dropping
for i in m.columns:
display(m[i].value_counts().rename_axis(i).to_frame('Number of samples'))
# <h2 id="3 QC-checks">3 QC checks</h2>
# <h3 id="3.1 Read-trimming">3.1 Read trimming</h3>
# The trimming parameters for the initial pipeline run (`<datestamp>_initial_run`) are set to 0 (no trimming). For subsequent runs, trimming parameters are set based on the read quality plots (not shown here; please browse `import_and_demultiplex/<runID>.qzv` using [QIIME's viewer](https://view.qiime2.org/) for quality plots). For this run, trimming parameters (also found in the config) are as follows:
# In[ ]:
get_ipython().system('grep -A4 "dada2_denoise" *.y[a]*ml')
# <h3 id="3.2 Proportion-of-non-bacterial-reads">3.2 Proportion of non-bacterial reads</h3>
# After error correction, chimera removal, removal of phiX sequences, and the four-step filtering defined above, the remaining reads are used for taxonomic classification. We are performing classification with a naive Bayes classifier trained on the SILVA 99% OTUs database that includes only the V4 region (defined by the 515F/806R primer pair). This data is located at `taxonomic_classification/silva-132-99-515-806-nb-classifier/barplots.qzv`. Please use [QIIME's viewer](https://view.qiime2.org/) for a more detailed interactive plot.
#
# The plots below show the "level 1" taxonomic classification. The first set of plots show relative abundances; the second show absolute. Plots are split into sets of ~500 samples per plot.
#
# Note that reads are being classified using a database of predominantly bacterial sequences, so human reads, for example, will generally be in the "Unclassified" category rather than "Eukaryota." Non-bacterial reads can indicate host (human) or other contamination.
# In[ ]:
get_ipython().system('unzip -q -d taxonomic_classification/rpt_silva taxonomic_classification/{ref_db}/barplots.qzv')
# In[ ]:
f = glob.glob('taxonomic_classification/rpt_silva/*/data/level-1.csv')
df_l1 = pd.read_csv(f[0])
df_l1 = df_l1.rename(columns = {'index':'Sample'})
df_l1 = df_l1.set_index('Sample')
df_l1 = df_l1.select_dtypes(['number']).dropna(axis=1, how='all')
df_l1_rel = df_l1.div(df_l1.sum(axis=1), axis=0) * 100
# In[ ]:
def split_df(df, max_rows = 500):
split_dfs = list()
rows = df.shape[0]
n = rows % max_rows
last_rows = True
for i in range(0, rows, max_rows):
# if the last remainder of the rows is less than half the max value,
# just combine it with the second-to-last plot
# otherwise it looks weird
if i in range(rows-max_rows*2,rows-max_rows) and n <= (max_rows // 2):
split_dfs.append(df.iloc[i:i+max_rows+n])
last_rows = False
elif last_rows:
split_dfs.append(df.iloc[i:i+max_rows])
return split_dfs
# need to split very large datasets so rendering doesn't get weird
# In[ ]:
df_list = split_df(df_l1)
df_rel_list = split_df(df_l1_rel)
# In[ ]:
for i in df_rel_list:
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# In[ ]:
for i in df_list:
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Absolute frequency',fontsize=52)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# <h4 id="3.2.1 Proportion-of-non-bacterial-reads-per-sample-type">3.2.1 Proportion of non-bacterial reads per sample type</h4>
# This section highlights non-bacterial reads in various sub-populations included in the study (e.g. study samples, robogut or artificial control samples, and blanks). This can be helpful with troubleshooting if some samples unexpectedly have a high proportion of non-bacterial reads.
# In[ ]:
def plot_level_1_subpops(samples,pop):
plt.rcParams["xtick.labelsize"] = 12
n = -0.5
r = 90
ha = "center"
f = 12
if len(samples) < 30:
plt.rcParams["xtick.labelsize"] = 40
n = -0.8
r = 40
ha = "right"
f = 40
df = df_l1_rel[df_l1_rel.index.isin(samples)]
for i in split_df(df):
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, n),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_xlabel('Sample',fontsize=f)
ax.set_title('Taxonomic classification, level 1, ' + pop + ' samples only',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size = 40)
ax.set_xticklabels(ax.get_xticklabels(), rotation=r, ha=ha)
plt.show()
# In[ ]:
if 'sampletype' in manifest.columns:
for i in manifest['sampletype'].unique():
l = list(manifest[manifest['sampletype'].str.match(i)].index)
plot_level_1_subpops(l,i)
else:
print("No Sample Type column detected in manifest.")
# ## Non-bacterial read removal
# Best practices indicate we should filter these reads regardless of the degree to which we observe them. The plots below show the "level 1" classification after removal of non-bacterial reads and reads without a phylum classification.
#
# This data is located at `taxonomic_classification_bacteria_only/silva-132-99-515-806-nb-classifier/barplots.qzv`. Please use [QIIME's viewer](https://view.qiime2.org/) for a more detailed interactive plot.
# In[ ]:
get_ipython().system('unzip -q -d taxonomic_classification_bacteria_only/rpt_silva taxonomic_classification_bacteria_only/{ref_db}/barplots.qzv')
# In[ ]:
f = glob.glob('taxonomic_classification_bacteria_only/rpt_silva/*/data/level-1.csv')
df_l1b = pd.read_csv(f[0])
df_l1b = df_l1b.rename(columns = {'index':'Sample'})
df_l1b = df_l1b.set_index('Sample')
df_l1b = df_l1b.select_dtypes(['number']).dropna(axis=1, how='all')
df_l1b_rel = df_l1b.div(df_l1b.sum(axis=1), axis=0) * 100
# In[ ]:
for i in split_df(df_l1b_rel):
plt.figure(dpi=200)
plt.rcParams["xtick.labelsize"] = 12
pal = sns.color_palette("Accent")
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Relative frequency (%)',fontsize=52)
ax.set_xlabel('Sample',fontsize=12)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
plt.show()
# In[ ]:
for i in split_df(df_l1b):
plt.figure(dpi=200)
pal = sns.color_palette("Accent")
plt.rcParams["xtick.labelsize"] = 12
ax = i.sort_values('D_0__Bacteria').plot.bar(stacked=True, color=pal, figsize=(60,7), width=1, edgecolor='white', ax=plt.gca())
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.5),ncol=4,fontsize=52)
ax.set_ylabel('Absolute frequency',fontsize=52)
ax.set_xlabel('Sample',fontsize=12)
ax.set_title('Taxonomic classification, level 1',fontsize=52)
ax.set_yticklabels(ax.get_yticks(), size=40)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90, ha="center", size=12)
plt.show()
# <h3 id="3.3 Sequencing-depth-distribution-per-flow-cell">3.3 Sequencing depth distribution per flow cell</h3>
# Per-sample read depths are recorded in `import_and_demultiplex/<runID>.qzv`. Those values are plotted below, excluding NTC and water blanks. Distributions per flow cell should be similar if the flow cells contained the same number of non-blank samples. If a flow cell contains fewer samples, each sample will have a greater number of reads, so that the total number of reads produced per flow cell remains approximately the same.
# In[ ]:
get_ipython().run_cell_magic('bash', '', 'cd import_and_demultiplex\nfor i in *qzv; do unzip -q $i -d "rpt_${i%.*}"; done\nfor i in rpt_*/*/data/per-sample-fastq-counts.csv; do j=${i%%/*}; k=${j#"rpt_"}; awk -v var="$k" \'BEGIN{FS=",";OFS="\\t"}$1!~/Sample name/{print $1,$2,var}\' $i >> t; done\ncat <(echo -e "Sample_name\\tSequence_count\\tRun_ID") t > rpt_vertical_per-sample-fastq-counts.csv\nrm t\ncd ..')
# In[ ]:
df_depth = pd.read_csv('import_and_demultiplex/rpt_vertical_per-sample-fastq-counts.csv',sep='\t')
search_values = ['Water','NTC']
df_depth_no_blanks = df_depth[~df_depth.Sample_name.str.contains('|'.join(search_values ),case=False)]
plt.figure(dpi=100)
sns.set(style="whitegrid")
ax = sns.boxplot(x="Run_ID",y="Sequence_count",data=df_depth_no_blanks)
ax.set_xticklabels(ax.get_xticklabels(),rotation=40,ha="right")#,fontsize=8)
ax.axes.set_title("Sequencing depth distribution per flow cell",fontsize=12)
# ax.tick_params(labelsize=8)
plt.show()
# <h3 id="3.4 Read-counts-after-filtering-in-blanks-vs.-study-samples">3.4 Read counts after filtering in blanks vs. study samples</h3>
# Per-sample read depths at each filtering step are recorded in `denoising/stats/<runID>.qzv`. The plots below show the mean for each category; error bars indicate the 95% confidence interval.
#
# NTC blanks are expected to have near-zero read depths, and represent false positives introduced by sequencing reagents.
#
# Water blanks are expected to have read depths that are at least one to two orders of magnitude lower than the average study sample depth. They represent the relatively low level of taxa that may be detected in the water used in the lab.
# In[ ]:
get_ipython().run_cell_magic('bash', '', 'cd denoising/stats/\nfor i in *qzv; do unzip -q $i -d "rpt_${i%.*}"; done\nfor i in rpt_*/*/data/metadata.tsv; do dos2unix -q $i; j=${i%%/*}; k=${j#"rpt_"}; awk -v var="$k" \'BEGIN{FS=OFS="\\t"}NR>2{print $0,var}\' $i >> t; done\ncat <(echo -e "sample-id\\tinput\\tfiltered\\tdenoised\\tmerged\\tnon-chimeric\\tflow_cell") t > rpt_denoising_stats.tsv\nrm t\ncd ../..')
# In[ ]:
df_stats = pd.read_csv('denoising/stats/rpt_denoising_stats.tsv',sep='\t')
df_stats = df_stats.set_index('sample-id')
# In[ ]:
def plot_read_counts(samples,pop):
plt.figure(dpi=100)
sns.set(style="whitegrid")
ax = sns.barplot(data=df_stats[df_stats.index.isin(samples)]).set_title('Number of reads in ' + pop + ' samples')
plt.show()
# In[ ]:
if 'sampletype' in manifest.columns:
for i in manifest['sampletype'].unique():
l = list(manifest[manifest['sampletype'].str.match(i)].index)
plot_read_counts(l,i)
else:
print("No Sample Type column detected in manifest.")
# The table below shows the 30 samples with the lowest non-chimeric read counts. This information may be helpful in identifying problematic samples and determining a minimum read threshold for sample inclusion. Note that low-depth study samples will be excluded from diversity analysis based on the sampling depth threshold selected (discussed in the following section).
# In[ ]:
if 'externalid' in manifest.columns:
display(df_stats.join(manifest[['externalid']])[['externalid','input','filtered','denoised','merged','non-chimeric']].sort_values(['non-chimeric']).head(30))
else:
display(df_stats[['input','filtered','denoised','merged','non-chimeric']].sort_values(['non-chimeric']).head(30))
# <h3 id="3.5 Sequential-sample--and-feature-based-filters">3.5 Sequential sample- and feature-based filters</h3>
# We remove samples and features based on the parameters defined in the config. For this run, filtering parameters are as follows:
# In[ ]:
get_ipython().system('grep "min_num_" *.y[a]*ml')
# Four sequential filtering steps are applied as follows:
# 1. Remove any samples with reads below the defined threshold
# 2. Remove any features with reads below the defined threshold
# 3. Remove any features that occur in fewer samples than the defined threshold
# 4. Remove any samples that contain fewer features than the defined threshold
#
# Filtering is propagated through to sequence tables as well.
#
# For this run, filtering resulted in the following counts:
# In[ ]:
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_1 read_feature_and_sample_filtering/feature_tables/1_remove_samples_with_low_read_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_2 read_feature_and_sample_filtering/feature_tables/2_remove_features_with_low_read_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_3 read_feature_and_sample_filtering/feature_tables/3_remove_features_with_low_sample_count.qzv')
get_ipython().system('unzip -q -d read_feature_and_sample_filtering/feature_tables/rpt_4 read_feature_and_sample_filtering/feature_tables/4_remove_samples_with_low_feature_count.qzv')
# In[ ]:
get_ipython().system('echo "Feature counts:"')
get_ipython().system('echo "no_filtering" $(grep -cv "^#" denoising/feature_tables/feature-table.from_biom.txt)')
get_ipython().system('echo "remove_samples_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_1/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_2/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_sample_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_3/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_samples_with_low_feature_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_4/*/data/feature-frequency-detail.csv | cut -d\' \' -f1)')
# In[ ]:
get_ipython().system('echo "Sample counts:"')
get_ipython().system('echo "no_filtering" $(grep -m1 "^#OTU" denoising/feature_tables/feature-table.from_biom.txt | tr "\\t" "\\n" | grep -cv "^#")')
get_ipython().system('echo "remove_samples_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_1/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_read_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_2/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_features_with_low_sample_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_3/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
get_ipython().system('echo "remove_samples_with_low_feature_count" $(wc -l read_feature_and_sample_filtering/feature_tables/rpt_4/*/data/sample-frequency-detail.csv | cut -d\' \' -f1)')
# <h3 id="3.6 Biological-replicates">3.6 Biological replicates</h3>
# Paired duplicates, for the purposes of this pipeline, are defined by an identical "ExternalID." The taxonomic classification (using the SILVA 99% OTUs database) at levels 2 through 7 are compared across each pair and evaluated using cosine similarity. The closer the cosine similarity value is to 1, the more similar the vectors are. Note that this comparison uses the taxonomic classification prior to removal of non-bacterial reads.
# In[ ]:
manifest_no_blanks = manifest[~manifest.index.str.contains('|'.join(['Water','NTC']),case=False)]
if 'externalid' in manifest_no_blanks.columns:
dup1_sample = list(manifest_no_blanks[manifest_no_blanks.duplicated(subset='externalid', keep='first')].sort_values('externalid').index)
dup2_sample = list(manifest_no_blanks[manifest_no_blanks.duplicated(subset='externalid', keep='last')].sort_values('externalid').index)
l = dup1_sample + dup2_sample
else:
print("No External ID column detected in manifest.")
# In[ ]:
def compare_replicates(f,l):
df = pd.read_csv(f[0])
df = df.rename(columns = {'index':'Sample'})
df = df.set_index('Sample')
df_dups = df[df.index.isin(l)]
df_dups = df_dups.select_dtypes(['number']).dropna(axis=1, how='all')
return df_dups
# In[ ]:
from scipy.spatial.distance import cosine
# In[ ]:
ids_list = []
if 'externalid' in manifest_no_blanks.columns:
for a, b in zip(dup1_sample, dup2_sample):
ids = [manifest.loc[a,'externalid'], a, b]
ids_list.append(ids)
df_cosine = | pd.DataFrame(ids_list, columns=['externalid','replicate_1','replicate_2']) | pandas.DataFrame |
def three_way_ANOVA(df_list):
f3_len = len(df_list)
f1_len, f2_len = len(df_list[0].columns), len(df_list[0].index)
# それぞれの因子の効果を求める
f1_mean = sum([df.mean(axis=1) for df in df_list]) / f3_len
f2_mean = sum([df.mean() for df in df_list]) / f3_len
f3_mean = pd.Series([df.mean().mean() for df in df_list])
f_mean = sum([df.mean().mean() for df in df_list]) / f3_len
f1_effect, f2_effect, f3_effect = f1_mean - f_mean, f2_mean - f_mean, f3_mean - f_mean
# 因子変動S1,S2,S3を求める
S1 = ((f1_effect**2) * (f1_len*f3_len)).sum()
S2 = ((f2_effect**2) * (f2_len*f3_len)).sum()
S3 = ((f3_effect**2) * (f1_len*f2_len)).sum()
# 因子1,2の交互作用による変動S12を求める
df_12 = (sum(df_list) / f3_len) - f_mean
S1_2 = (df_12**2).sum().sum() * f3_len
S12 = S1_2 - S1 - S2
# 因子1,3の交互作用による変動S13を求める
df_13 = pd.DataFrame([df.mean(axis=1) for df in df_list]) - f_mean
S1_3 = (df_13**2).sum().sum() * f1_len
S13 = S1_3 - S1 - S3
# 因子2,3の交互作用による変動S23を求める
df_23 = pd.DataFrame([df.mean() for df in df_list]) - f_mean
S2_3 = (df_23**2).sum().sum() * f2_len
S23 = S2_3 - S2 - S3
# 誤差変動Seを求める
St = sum([((df-f_mean)**2).sum().sum() for df in df_list])
Se = St - S1 - S2 - S3 - S12 - S13 - S23
# 自由度dfを求める
df1 = f2_len - 1
df2 = f1_len - 1
df3 = f3_len - 1
df12 = df1 * df2
df13 = df1 * df3
df23 = df2 * df3
dfe = df1 * df2 * df3
dft = df1 + df2 + df3 + df12 + df13 + df23 + dfe
# 不偏分散Vを求める
V1 = S1 / df1
V2 = S2 / df2
V3 = S3 / df3
V12 = S12 / df12
V13 = S13 / df13
V23 = S23 / df23
Ve = Se / dfe
# F値を求める
F1 = V1 / Ve
F2 = V2 / Ve
F3 = V3 / Ve
F12 = V12 / Ve
F13 = V13 / Ve
F23 = V23 / Ve
# p値を求める
p1 = 1 - st.f.cdf(F1, dfn=df1, dfd=dfe)
p2 = 1 - st.f.cdf(F2, dfn=df2, dfd=dfe)
p3 = 1 - st.f.cdf(F3, dfn=df3, dfd=dfe)
p12 = 1 - st.f.cdf(F12, dfn=df12, dfd=dfe)
p13 = 1 - st.f.cdf(F13, dfn=df13, dfd=dfe)
p23 = 1 - st.f.cdf(F23, dfn=df23, dfd=dfe)
# 分散分析表を作成する
df_S = pd.Series([S1, S2, S3, S12, S13, S23, Se, St])
df_df = pd.Series([df1, df2, df3, df12, df13, df23, dfe, dft])
df_V = pd.Series([V1, V2, V3, V12, V13, V23, Ve])
df_F = pd.Series([F1, F2, F3, F12, F13, F23])
df_p = pd.DataFrame([p1, p2, p3, p12, p13, p23], columns=['p'])
df_p['sign'] = df_p['p'].apply(lambda x : '**' if x < 0.01 else '*' if x < 0.05 else '')
df_ANOVA = pd.concat([df_S, df_df, df_V, df_F, df_p], axis=1).set_axis(['S','df','V','F','p','sign'], axis=1).set_axis(['Indexes', 'Columns', 'Tables', 'Indexes*Columns', 'Indexes*Tables', 'Columns*Tables', 'Error', 'Total']).rename_axis('ANOVA_table', axis=1).fillna('')
# 因子の効果をデータフレームにまとめる
df_effect_indexes = pd.DataFrame(f1_effect).set_axis(['mean'], axis=1)
df_effect_columns = pd.DataFrame(f2_effect).set_axis(['mean'], axis=1)
df_effect_tables = | pd.DataFrame(f3_effect) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# @Time: 2020/6/30,030 13:36
# @Last Update: 2020/6/30,030 13:36
# @Author: 徐缘
# @FileName: lightGBM.py
# @Software: PyCharm
import os
import datetime
import requests
import time
import json
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score, roc_auc_score, roc_curve, auc, accuracy_score, precision_score
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import lightgbm as lgb
import gc
from tqdm import tqdm # 进度条
# 构造训练数据集
def get_all_train_data(train_path):
all_train_data = pd.DataFrame()
ID = 0
print(len(os.listdir(train_path)))
for now_csv in tqdm(os.listdir(train_path)):
data = pd.read_csv(os.path.join(train_path, now_csv))
all_train_data = all_train_data.append(data)
all_train_data['end_time'] = '2020-03-10'
all_train_data['告警开始时间'] = pd.to_datetime(all_train_data['告警开始时间'], format='%Y-%m-%d %H:%M:%S')
all_train_data['end_time'] = pd.to_datetime(all_train_data['end_time'], format='%Y-%m-%d')
all_train_data['time_gap'] = all_train_data.apply(lambda x: (x['end_time'] - x['告警开始时间']).days, axis=1)
return all_train_data
# 生成训练集
def gener_train_data(all_data, times):
"""
i: 从第几天开始抽取
times: 抽取多少次
"""
print('*********')
res_data = pd.DataFrame()
all_data['end_time'] = pd.to_datetime(all_data['end_time'], format='%Y-%m-%d %H:%M:%S')
for i in tqdm(range(times)):
label_data = all_data[all_data['time_gap'] == i]
# 生成label
label_data['label'] = label_data['告警名称'].apply(
lambda x: 1 if x.strip() == '网元连接中断' or x.strip() == '小区不可用告警' else 0)
label_data = label_data.groupby('基站名称')['label'].agg('sum').reset_index()
label_data['label'] = label_data['label'].apply(lambda x: 1 if x > 0 else 0)
# 取前7天的训练数据
tmp_data = all_data[(all_data['time_gap'] > i) & (all_data['time_gap'] <= i + 7) &
(all_data['基站名称'].isin(label_data['基站名称']))] # 深拷贝 筛选
# 处理时间 保持同一个窗口内,大小为7
tmp_data['time_gap'] = tmp_data['time_gap'] - i
tmp_data = tmp_data.merge(label_data, on='基站名称', how='left')
tmp_data['ID'] = tmp_data['基站名称'] + '_' + str(i)
tmp_data['end_time'] = tmp_data['end_time'] - datetime.timedelta(days=i) # 你这个是str -
res_data = res_data.append(tmp_data)
return res_data
# 生成的训练数据
def get_train_data(all_data, times=1):
save_path = './all_train_data_sample_' + str(times) + '.csv'
if not os.path.exists(save_path):
res_data = gener_train_data(all_data, times)
res_data.to_csv(save_path, index=False)
res_data = pd.read_csv(save_path)
return res_data
# 构造特征
def gener_fea(data):
res = data[['ID', 'end_time', 'label']].drop_duplicates()
# 构造特征
# 1、统计每个样本在1,2,3,4,5,6,7天内故障出现的总次数
tmp = data.groupby(by=['ID', 'time_gap']).agg({'基站名称': 'count'}).reset_index()
tmp = pd.pivot_table(tmp, index='ID', columns='time_gap', values='基站名称').reset_index()
# 7天内故障的总次数
tmp['sum_guzhang_7'] = tmp[[1, 2, 3, 4, 5, 6, 7]].apply(lambda x: x.sum(), axis=1)
tmp.rename(columns={1: 'guzhang_1', 2: 'guzhang_2', 3: 'guzhang_3', 4: 'guzhang_4', 5: 'guzhang_5', 6: 'guzhang_6',
7: 'guzhang_7'}, inplace=True)
res = res.merge(tmp, on='ID', how='left')
# 7天内出现故障的总类型数量
tmp = data.groupby(by=['ID']).agg({'告警名称': 'nunique'}).reset_index().rename(columns={'告警名称': '7_gaojing_nunique'})
res = res.merge(tmp, on='ID', how='left')
# 每天出现故障的类型数量
tmp = data.groupby(by=['ID', 'time_gap']).agg({'告警名称': 'nunique'}).reset_index()
tmp = pd.pivot_table(tmp, index='ID', columns='time_gap', values='告警名称').reset_index()
tmp.rename(columns={1: 'guzhang_types_1', 2: 'guzhang_types_2', 3: 'guzhang_types_3', 4: 'guzhang_types_4',
5: 'guzhang_types_5',
6: 'guzhang_types_6', 7: 'guzhang_types_7'}, inplace=True)
res = res.merge(tmp, on='ID', how='left')
# 7天内出现故障的天数
tmp = data.groupby(by=['ID']).agg({'time_gap': 'nunique'}).reset_index().rename(
columns={'time_gap': 'time_gap_nunique'})
res = res.merge(tmp, on='ID', how='left')
# 7天内,平均每天的故障次数
res['sum_guzhang_7/7'] = res['sum_guzhang_7'] / 7
# 发生故障时,平均每天的故障次数
res['sum_guzhang_7/time_gap_nunique'] = res['sum_guzhang_7'] / res['time_gap_nunique']
# 故障告警、异常告警、失败告警、
def get_guzhang(x, gaojing_type):
res = 0
for i in x:
if i.find(gaojing_type) != -1:
res += 1
return res
# 故障类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, '故障')).reset_index().rename(columns={'告警名称': 'guzhang_sum'})
res = res.merge(tmp, on='ID', how='left')
# 异常类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, '异常')).reset_index().rename(columns={'告警名称': 'yichang_sum'})
res = res.merge(tmp, on='ID', how='left')
# 失败类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, '失败')).reset_index().rename(columns={'告警名称': 'shibai_sum'})
res = res.merge(tmp, on='ID', how='left')
# 小区类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, '小区')).reset_index().rename(columns={'告警名称': 'xiaoqu_sum'})
res = res.merge(tmp, on='ID', how='left')
# 射频类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, '射频')).reset_index().rename(columns={'告警名称': 'shepin_sum'})
res = res.merge(tmp, on='ID', how='left')
# BBU类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, 'BBU')).reset_index().rename(columns={'告警名称': 'BBU_sum'})
res = res.merge(tmp, on='ID', how='left')
# RHUB类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, 'RHUB')).reset_index().rename(columns={'告警名称': 'RHUB_sum'})
res = res.merge(tmp, on='ID', how='left')
# 射频类告警的次数
tmp = data.groupby(by=['ID'])['告警名称'].apply(
lambda x: get_guzhang(x, 'RRU')).reset_index().rename(columns={'告警名称': 'RRU_sum'})
res = res.merge(tmp, on='ID', how='left')
# 7天内发生 网元连接中断 和 小区不可用告警 的天数
tmp = data[(data['告警名称'] == '网元连接中断') | (data['告警名称'] == '小区不可用告警')]
tmp['start_time'] = tmp['告警开始时间'].apply(lambda x: str(x)[:10])
tmp = tmp.drop_duplicates(subset=['ID', 'start_time'], keep='first')
tmp = tmp.groupby(['ID']).agg({'start_time': 'nunique'}).reset_index().rename(columns={'start_time': 'label1_days'})
res = res.merge(tmp, on='ID', how='left')
# 最近一次发生 网元连接中断 和 小区不可用告警 的天数 距今的时间
tmp = data[(data['告警名称'] == '网元连接中断') | (data['告警名称'] == '小区不可用告警')]
tmp['告警开始时间'] = pd.to_datetime(tmp['告警开始时间'], format='%Y-%m-%d %H:%M:%S')
tmp['end_time'] = pd.to_datetime(tmp['end_time'], format='%Y-%m-%d %H:%M:%S')
tmp['near_label1_gaojing_gap'] = tmp.apply(lambda x: (x['end_time'] - x['告警开始时间']).days, axis=1)
tmp2 = tmp.groupby(by=['ID', '告警名称']).agg({'near_label1_gaojing_gap': 'min'}).reset_index()
tmp2 = pd.pivot_table(tmp2, index='ID', columns='告警名称', values='near_label1_gaojing_gap').reset_index().rename(
columns={'网元连接中断': 'near_wangyuan_gap', '小区不可用告警': 'near_xiaoqu_gap'})
res = res.merge(tmp2, on='ID', how='left')
# 每个 网元连接中断 或 小区不可用告警 的时间间隔
tmp3 = tmp
rename = 'label1_gap'
tmp3.sort_values(['ID', '告警开始时间'], inplace=True)
tmp3['next_gaojing_time'] = tmp3.groupby(by=['ID'])['告警开始时间'].shift(-1)
tmp3['gaojing_gaps'] = tmp3.apply(lambda x: (x['next_gaojing_time'] - x['告警开始时间']).seconds, axis=1)
tmp3 = tmp3[['ID', '告警开始时间', 'next_gaojing_time', 'gaojing_gaps']]
tmp3.dropna(subset=['gaojing_gaps'], inplace=True)
tmp3 = tmp3.groupby(by=['ID'])['gaojing_gaps'].agg(['max', 'min', 'mean', 'std', 'skew']).reset_index().rename(
columns={'max': rename + '_max', 'min': rename + '_min', 'mean': rename + '_mean', 'std': rename + '_std',
'skew': rename + '_skew'})
res = res.merge(tmp3, on='ID', how='left')
# print(tmp3)
# 统计每个告警类型,在7天内发生的次数
tmp = data.groupby(by=['ID', '告警名称']).agg({'基站名称': 'count'}).reset_index()
tmp = pd.pivot_table(tmp, index='ID', columns='告警名称', values='基站名称').reset_index()
cols = {}
i = 0
for col in tmp.columns:
if col not in ['ID', 'end_time', 'label']:
cols[col] = i
i += 1
tmp.rename(columns=cols, inplace=True)
res = res.merge(tmp, on='ID', how='left')
res['0/sum7'] = res[0] / res['sum_guzhang_7']
res['1/sum7'] = res[1] / res['sum_guzhang_7']
res['3/sum7'] = res[3] / res['sum_guzhang_7']
return res
def search_threthold(true, pred):
score = 0
bestThrethold = 0
for i in np.arange(0, 1, 0.01):
if f1_score(true, np.where(pred > i, 1, 0)) > score:
score = f1_score(true, np.where(pred > i, 1, 0))
bestThrethold = i
else:
pass
return bestThrethold
def train_lgb_model(train_, valid_, valid_2, id_name, label_name, categorical_feature=None, seed=1024, is_shuffle=True):
folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)
train_['res'] = 0
pred = [col for col in train_.columns if col not in [id_name, label_name, 'res']]
print('特征数量为:', len(pred))
sub_preds = np.zeros((valid_.shape[0], folds.n_splits))
sub_preds2 = np.zeros((valid_2.shape[0], folds.n_splits))
params = {
'learning_rate': 0.01,
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': ['binary_logloss', 'auc'],
'num_leaves': 32,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'bagging_freq': 5,
'seed': 1,
# 'device': 'gpu',
'bagging_seed': 1,
'feature_fraction_seed': 7,
'min_data_in_leaf': 28,
'nthread': -1,
'verbose': -1,
}
fea_impor = pd.DataFrame()
fea_impor['column'] = train_[pred].columns
fea_impor['importance'] = 0
for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_, train_[label_name]), start=1):
print(f'the {n_fold} training start ...')
train_x, train_y = train_[pred].iloc[train_idx], train_[label_name].iloc[train_idx]
valid_x, valid_y = train_[pred].iloc[valid_idx], train_[label_name].iloc[valid_idx]
dtrain = lgb.Dataset(train_x, label=train_y)
dvalid = lgb.Dataset(valid_x, label=valid_y)
clf = lgb.train(
params=params,
train_set=dtrain,
num_boost_round=1000,
valid_sets=[dtrain, dvalid],
early_stopping_rounds=100,
# feval=fscore,
verbose_eval=100,
)
fea_impor['tmp'] = clf.feature_importance()
fea_impor['importance'] = fea_impor['importance'] + fea_impor['tmp']
sub_preds[:, n_fold - 1] = clf.predict(valid_[pred], num_iteration=clf.best_iteration)
sub_preds2[:, n_fold - 1] = clf.predict(valid_2[pred], num_iteration=clf.best_iteration)
train_pred = clf.predict(valid_x, num_iteration=clf.best_iteration)
tmp_score = roc_auc_score(valid_y, train_pred)
train_['res'].iloc[valid_idx] = train_['res'].iloc[valid_idx] + train_pred
print(f'Orange roc_auc_score score: {tmp_score}')
tmp_score = roc_auc_score(train_[label_name], train_['res'])
print(f'five flod roc_auc_score score: {tmp_score}')
train_.sort_values(by=['res'], ascending=False, inplace=True)
# 按照0.5划分
th = search_threthold(train_[label_name], train_['res'])
train_['res'] = train_['res'].apply(lambda x: 1 if x > th else 0)
tmp_f1 = f1_score(train_[label_name], train_['res'])
print(f'five flod tmp_f1 score: {th, tmp_f1}')
valid_[label_name] = np.mean(sub_preds, axis=1)
valid_2[label_name] = np.mean(sub_preds2, axis=1)
valid_['基站名称'] = valid_[id_name]
valid_2['基站名称'] = valid_2[id_name]
valid_['未来24小时发生退服类告警的概率'] = valid_[label_name]
valid_2['未来24小时发生退服类告警的概率'] = valid_2[label_name]
return th, valid_[['基站名称', '未来24小时发生退服类告警的概率']], \
valid_2[['基站名称', '未来24小时发生退服类告警的概率']]
if __name__ == '__main__':
train_path = '/mnt/5/Alert_BTS_HW_1001-0309'
# 0316-0322
test_0322_path = '/mnt/5/Alert_BTS_HW_0316-0322'
# 0324-0330
test_0330_path = '/mnt/5/Alert_BTS_HW_0324-0330'
train_path = '../data/train'
test_0322_path = '../data/test'
test_0330_path = '../data/test'
# 处理测试数据集
all_test_data = pd.DataFrame()
for now_csv in tqdm(os.listdir(test_0322_path)):
data = pd.read_csv(os.path.join(test_0322_path, now_csv))
data['end_time'] = '2020-03-23'
data['label'] = -1
data['ID'] = data['基站名称']
all_test_data = all_test_data.append(data)
for now_csv in tqdm(os.listdir(test_0330_path)):
data = pd.read_csv(os.path.join(test_0330_path, now_csv))
data['end_time'] = '2020-03-31'
data['label'] = -1
data['ID'] = data['基站名称']
all_test_data = all_test_data.append(data)
all_test_data['end_time'] = pd.to_datetime(all_test_data['end_time'], format='%Y-%m-%d')
all_test_data['告警开始时间'] = pd.to_dateti | me(all_test_data['告警开始时间'], format='%Y-%m-%d %H:%M:%S') | pandas.to_datetime |
import os
# Reduce CPU load. Need to perform BEFORE import numpy and some other libraries.
os.environ['MKL_NUM_THREADS'] = '2'
os.environ['OMP_NUM_THREADS'] = '2'
os.environ['NUMEXPR_NUM_THREADS'] = '2'
import json
import numpy as np
import pandas as pd
from typing import Optional, List, Tuple, Union
from collections import OrderedDict
import requests
from tqdm import tqdm
import re
from catboost import CatBoostClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.model_selection import train_test_split
from scipy.spatial.distance import cosine, cityblock, canberra, euclidean, minkowski, braycurtis
from sklearn.metrics import log_loss
# Setup logging
import logging
logging.basicConfig(
format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
datefmt='%y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
log = logging.getLogger('agro')
RANDOM_SEED = 2021
"""
# Общая идея
Эта задача по смыслу сходна с задачей Sentiment Analysis.
То есть, когда тексту в соответствие ставится один или несколько классов,
например: (положительный, негативный, нейтральный)
В данном случае: несколько классов может быть присвоено одновременно (MultiLabel Classification)
Я решил, что для этой цели подойдёт архитектура Transformers.
Точнее, её первая половина: TransformerEncoder.
На вход вместо слов подаётся последовательность эмбедингов (Embeddings).
То есть, каждому слову ставится в соответствие точка в N-мерном пространстве.
Обычно N: от 100 до 300.
Для каждого `embedding` добавляем информацию о положении слова в тексте: `PositionalEncoding`.
Далее несколько слоёв TransformerEncoder обрабатывают всю последовательность сразу,
усиляя одни блоки и ослабляя другие, выделяя, таким образом, важную информацию.
Затем обработанная последовательность сравнивается с некими целевыми эмбедингами (Target Embeddings),
которые описывают то или иное заболевание.
При сравнении вся последовательность сливается в некий единый эмбединг, по одному для каждого класса.
Финальный этап, получившийся набор эмбеддингов (фиксированного размера) пропускается через Linear слой,
чтобы создать вероятности для каждого заболевания.
"""
"""
# Словарь Embeddings для русского языка
Для работы нам потребуются готовые `embeddings` для русских слов.
Есть некоторые доступные для скачивания словари на
[RusVectores](https://rusvectores.org/ru/)
Но размер словарей в них: от 150 до 300 тысяч слов, что довольно мало.
Также, не совсем понятны условия их лицензии.
Есть проект ["Наташа"](https://github.com/natasha/navec).
Размер словаря: 500k слов.
Существует также другой интересный проект:
[DeepPavlov](https://docs.deeppavlov.ai/en/0.0.7/intro/pretrained_vectors.html),
содержащий около 1.5 млн. слов.
Его лицензия: **Apache 2.0** - позволяет как свободное, так и коммерческое использование.
С последним я и буду работать.
Нам потребуется скачать весь словарь, размером 4.14Гб, а затем загрузить его в память.
"""
class GloveModel():
"""
For a given text returns a list of embeddings
"""
Pat_Split_Text = re.compile(r"[\w']+|[.,!?;]", flags=re.RegexFlag.MULTILINE)
Unk_Tag: int = -1
Num_Tag: int = -1
def __init__(self, substitutions: Optional[str] = None, log: Optional[logging.Logger] = None):
if log is None:
log = logging.getLogger()
# Load Glove Model. Download and convert from text to .feather format (which is much faster)
glove_file_feather = 'ft_native_300_ru_wiki_lenta_lower_case.feather'
if not os.path.exists(glove_file_feather):
glove_file_vec = glove_file_feather.rsplit(os.extsep, 1)[0] + '.vec'
if not os.path.exists(glove_file_vec):
log.info('Downloading glove model for russia language from DeepPavlov...')
self.download_file(
'http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_lower_case/'
'ft_native_300_ru_wiki_lenta_lower_case.vec'
)
log.info('Done')
# Load model from .vec file
log.info('Loading Glove Model from .vec format...')
self.glove = self.load_glove_model(glove_file_vec, size=300)
log.info(f'{len(self.glove)} words loaded!')
log.info('Saving Glove Model to .feather format...')
self.glove.reset_index().to_feather(glove_file_feather)
else:
log.info('Loading Glove Model from .feather format...')
self.glove = pd.read_feather(glove_file_feather)
log.info(f'{len(self.glove)} words loaded!')
log.info('Sorting glove dataframe by words...')
self.glove.sort_values('word', axis=0, ignore_index=True, inplace=True)
log.info('Done')
self.subs_tab = {}
if isinstance(substitutions, str):
for line in substitutions.splitlines():
words = line.strip().lower().split()
if len(words) < 2:
continue
self.subs_tab[words[0]] = words[1:]
log.info(f'Using the substitutions table of {len(self.subs_tab)} records')
"""
Для неизвестных слов я буду использовать embedding слова 'unk'.
А для чисел - embedding слова 'num'.
Я не уверен, что авторы DeepPavlov именно так и планировали.
Но стандартных '<unk>' или '<num>' я там не обнаружил.
"""
self.Unk_Tag = int(self.glove.word.searchsorted('unk'))
self.Num_Tag = int(self.glove.word.searchsorted('num'))
assert self.glove.word[self.Unk_Tag] == 'unk', 'Failed to find "unk" token in Glove'
assert self.glove.word[self.Num_Tag] == 'num', 'Failed to find "num" token in Glove'
def __len__(self):
return len(self.glove)
def __getitem__(self, text: str) -> List[np.ndarray]:
tags = self.text2tags(text, return_offsets=False)
embeddings = [self.tag2embedding(tag) for tag in tags]
return embeddings
@staticmethod
def download_file(url: str, block_size=4096, file_name: Optional[str] = None):
"""Downloads file and saves it to local file, displays progress bar"""
with requests.get(url, stream=True) as response:
if file_name is None:
if 'Content-Disposition' in response.headers.keys():
file_name = re.findall('filename=(.+)', response.headers['Content-Disposition'])[0]
if file_name is None:
file_name = url.split('/')[-1]
expected_size_in_bytes = int(response.headers.get('content-length', 0))
received_size_in_bytes = 0
with tqdm(total=expected_size_in_bytes, unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'wb') as file:
for data in response.iter_content(block_size):
file.write(data)
pbar.update(len(data))
received_size_in_bytes += len(data)
if (expected_size_in_bytes != 0) and (expected_size_in_bytes != received_size_in_bytes):
raise UserWarning(f'Incomplete download: {received_size_in_bytes} of {expected_size_in_bytes}')
@staticmethod
def load_glove_model(file_name: str, encoding: str = 'utf-8', size: Optional[int] = None) -> pd.DataFrame:
"""
Loads glove model from text file into pandas DataFrame
Returns
-------
df : pd.DataFrame
A dataframe with two columns: 'word' and 'embedding'.
The order of words is preserved as in the source file. Thus it may be unsorted!
"""
words, embeddings = [], []
with tqdm(total=os.path.getsize(file_name), unit='iB', unit_scale=True, position=0, leave=True) as pbar:
with open(file_name, 'r', encoding=encoding) as f:
first_line = True
line = f.readline()
while line:
split_line = line.split()
line = f.readline()
if first_line:
first_line = False
if len(split_line) == 2:
if size is None:
size = int(split_line[1])
else:
assert size == int(split_line[1]), \
f'Size specified at the first line: {int(split_line[1])} does not match: {size}'
continue
if size is not None:
word = ' '.join(split_line[0:-size])
embedding = np.array(split_line[-size:], dtype=np.float32)
assert len(embedding) == size, f'{line}'
else:
word = split_line[0]
embedding = np.array(split_line[1:], dtype=np.float32)
size = len(embedding)
words.append(word)
embeddings.append(embedding)
pbar.update(f.tell() - pbar.n)
return pd.DataFrame({'word': words, 'embedding': embeddings})
def word2tag(self, word: str, use_unk=True, use_num=True) -> int:
tag = self.glove.word.searchsorted(word)
if tag == len(self.glove):
return self.Unk_Tag if use_unk else -1
if self.glove.word[tag] == word:
return int(tag)
if use_num:
try:
num = float(word)
return self.Num_Tag
except ValueError:
pass
return self.Unk_Tag if use_unk else -1
def tag2embedding(self, tag: int) -> np.ndarray:
return self.glove.embedding[tag]
def word2embedding(self, word: str) -> np.ndarray:
tag = self.word2tag(word)
return self.glove.embedding[tag]
@staticmethod
def separate_number_chars(s) -> List[str]:
"""
Does what its name says.
Examples
--------
'october10' -> ['october', '10']
'123asdad' -> ['123', 'asdad']
'-12.3kg' -> ['-12.3', 'kg']
'1aaa2' -> ['1', 'aaa', '2']
"""
res = re.split(r'([-+]?\d+\.\d+)|([-+]?\d+)', s.strip())
res_f = [r.strip() for r in res if r is not None and r.strip() != '']
return res_f
def text2tags(self, text: str, return_offsets=True) -> Union[List[int], Tuple[List[int], List[int]]]:
text = text.lower()
tags = []
offsets = []
for m in self.Pat_Split_Text.finditer(text):
# Get next word and its offset in text
word = m.group(0)
offset = m.start(0)
# Current word can be converted to a list of words due to substitutions: 'Iam' -> ['I', 'am']
# or numbers and letters separations: '123kg' -> ['123', 'kg']
if word in self.subs_tab:
words = self.subs_tab[word]
else:
words = self.separate_number_chars(word)
# Get a list of tags, generated on the source word.
# Note: they all point to the same offset in the original text.
for word in words:
tags.append(self.word2tag(word))
offsets.append(offset)
if not return_offsets:
return tags
return tags, offsets
"""
# Решение проблемы отсутствующих слов
По условиям конкурса:
> Запрещается Использовать ручную *разметку* *тестовых* данных в качестве решения, в т.ч. любые сервисы разметки.
При этом, не вполне ясно определено, что подразумевается под *разметкой* данных.
В любом случае, речь в запрете идёт о **тестовых** данных.
Поэтому, условия конкурса НЕ запрещают мне подготовить словарь для исправления некоторых ошибок,
а также для замены некоторых слов, которые отсутствуют в `embeddings`.
"""
SUBSTITUTIONS = """
цинксодержащие цинк содержащие
проглистогонила дала препарат от глистов
проглистогонил дал препарат от глистов
проглистовать дать препарат от глистов
проглистовали дали препарат от глистов
глистогонить дать препарат от глистов
противогельминтные против глистов
спазган обезболивающий препарат
спазгане обезболивающем препарата
спазганом обезболивающим препаратом
чемерицы рвотный препарат
чемерица рвотный препарат
чемерицей рвотным препаратом
седимин железосодерщащий препарат
левомеколь антисептической мазью
левомиколь антисептическая мазь
левомеколью антисептической мазью
левомиколью антисептической мазью
левомеколем антисептической мазью
левомиколем антисептической мазью
пребиотик пробиотик
пребеотик пробиотик
прибиотик пробиотик
прибеотик пробиотик
прибиотика пробиотик
пробиотика пробиотик
прибеотика пробиотик
пробеотика пробиотик
отел отёл
отелл отёл
оттел отёл
оттелл отёл
отелу отёлу
отеллу отёлу
оттелу отёлу
оттеллу отёлу
отёле родах
отёлл отёл
оттёл отёл
оттёлл отёл
отёллу отёлу
оттёлу отёлу
оттёллу отёлу
оттела отёла
отелла отёла
оттелла отёла
оттёла отёла
отёлла отёла
оттёлла отёла
отёлом отелом
оттелом отелом
отеллом отелом
оттеллом отелом
оттёлом отелом
отёллом отелом
оттёллом отелом
отелы отёлы
отеллы отёлы
оттелы отёлы
оттеллы отёлы
отелов отёлов
отеллов отёлов
оттелов отёлов
оттеллов отёлов
телилась рожала
отелилась родила
отёлилась родила
бурёнке корове
буренке корове
тёлке корове
тёлочке корове
тёлочка телочка
тёлочку корову
укоровы у коровы
телке корове
телки коровы
бычёк бычек
телятки телята
первотелка корова
первотелки коровы
новотельной коровы
коровушки коровы
доим дою
доишь дою
сдаиваю дою
выдаиваю дою
сдаиваем дою
выдаивем дою
додаиваю дою до конца
доились давали молоко
доется доится
выдаивании доении
сцеживал доил
сцеживала доила
доением отбором молока
сдаивание дойка
отпоил напоил
отпоила напоила
отпоили напоили
выпоить напоить
выпоили напоили
пропоить напоить
пропоили напоили
поите давайте пить
поили давали пить
свищик свищ
свищики свищи
гноящийся гнойный
выдрана вырвана
апитит аппетит
аппитит аппетит
апиттит аппетит
апетит аппетит
апеттит аппетит
опетит аппетит
оппетит аппетит
опеттит аппетит
оппеттит аппетит
опитит аппетит
зарастёт зарастет
пощаще почаще
паздбища пастбища
причинай причиной
пречинай причиной
килограм килограмм
килаграм килограмм
килаграмм килограмм
пузатенькая пузатая
абсцез абсцесс
абсцес абсцесс
абсцезс абсцесс
абсцэз абсцесс
абсцэс абсцесс
абсцэзс абсцесс
перестраховываюсь чересчур переживаю
непроходили не проходили
обкололи поставили укол
колили кололи
вколото поставлено
вкалол вколол
кольнул уколол
истыкали прокололи
накосячил ошибся
ветаптеке ветеринарной аптеке
ветаптеки ветеринарной аптеки
ветаптеку ветеринарную аптеку
житкостью жидкостью
рацеоне рационе
худющие худые
здох сдох
скаждым с каждым
четветый четвертый
ожёг ожег
поднятся подняться
захромала начала хромать
искривился стал кривым
расцарапывает царапает
вычесывает чешется
подшатываются шатаются
пошатываются шатаются
ветиринар ветеринар
ветеринат ветеринар
ветеренаров ветеринаров
ветиренаров ветеринаров
ветеренара ветеринара
ветиренара ветеринара
ветеренару ветеринару
ветиренару ветеринару
ветеренаром ветеринаром
ветиренаром ветеринаром
ветеренары ветеринары
ветиренары ветеринары
расслоилось разделилось на слои
разслоилось разделилось на слои
дегтеобразное похожее на деготь
дегтеобразная похожая на деготь
кремообразное похожее на крем
кремообразная похожая на крем
волосики волосы
залысина лысина
облазит линяет
уменя у меня
делоть делать
дилоть делать
дилать делать
зади сзади
взади сзади
взаде сзади
какба как-бы
какбы как-бы
прошупывается прощупывается
прашупывается прощупывается
пращупывается прощупывается
клещь клещ
клешь клещ
клеш клещ
клещь клещ
клещем клещ
клешем клещ
рвотная рвотный
тужится напрягается
тужиться напрягаться
какает испражняется
срет испражняется
срёт испражняется
дрищет испражняется
запоносил начал поносить
дристать поносить
подсохло высохло
нарывать опухать
оттекла отекла
отекшее опухшее
отёкшее опухшее
припух опух
припухло опухло
припухла опухла
опухшая набухшая
апухшая набухшая
вздувает раздувает
воспаленное поврежденное
вспухшие опухшие
расперло опухло
зашибла ушибла
припухлостей шишек
припухлостями шишками
припухлостям шишкам
припухлостях шишках
припушлостям шишкам
покраснений красноты
жидковат жидкий
жидковатый жидкий
жидковато жидко
жиденький жидкий
животина животное
животины животного
животине животному
животиной животным
животиною животным
температурит имеет повышенную температуру
темпиратурит имеет повышенную температуру
тимпературит имеет повышенную температуру
тимпиратурит имеет повышенную температуру
температурить иметь повышенную температуру
темпиратурить иметь повышенную температуру
тимпиратурить иметь повышенную температуру
тимпературить иметь повышенную температуру
покашливает кашляет
подкашливает кашляет
покашливают кашляют
подкашливают кашляют
откашливаются кашляют
покашливал кашлял
подкашливал кашлял
покашливали кашляли
подкашливали кашляли
откашливались кашляли
"""
def log_loss_score(prediction, ground_truth):
log_loss_ = 0
ground_truth = np.array(ground_truth)
for i in range(10):
log_loss_ += log_loss(ground_truth[:, i], prediction[:, i])
return log_loss_ / 10
def main():
glove = GloveModel(substitutions=SUBSTITUTIONS, log=log)
log.info('Loading train and test datasets...')
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
log.info(f'Loaded train: {len(train)} records, and test: {len(test)} records')
"""
# Обработка текстовых данных
Преобразуем текст (произвольной длины) в набор фич конечной длины.
Для этого придумаем некоторые ключевые слова или фразы, например: симптомы болезней.
Найдём для них соответствующие эмбединги.
Эмбединги каждой фразы объединим в один эмбединг путём усреднения векторов (можно и суммой, но не суть).
Эти ключевые слова, эти симптомы - будут своебразными "маяками", или, если хотите, точками отсчёта.
Каждое текстовое описание *неопределенной длины* мы заменим *конечным* набором расстояний до этих ключевых маяков.
1. При анализе каждого текста мы разобьём его на **токены** - слова и знаки препинания.
2. Далее для каждого токена найдём **эмбединг**. Для отсутствующих слов - 'unk', для чисел - 'num'.
3. Затем вычислим **расстояние** от этого эмбединга до всех ключевых эмбедингов.
Евклидово расстояние неинформативно в многомерном пространстве.
Но расстояний много разных бывает. Мы будет вычислять 4 расстояния:
``(cosine, cityblock, euclidean, braycurtis)``
4. При этом, для всего текста будем запоминать только **наименьшие** расстояния до каждого ключевого слова.
5. Таким образом, из текста неограниченнной длины мы получим лишь **фиксированный набор**
минимальных расстояний до ключевых слов.
"""
# Symptoms keywords (or phrases)
anchors = [
'кокцидии', 'абсцесс', 'диспепсия', 'гельминтоз', 'мастит', 'ринотрахеит', 'отёк вымени',
'воспаление сухожилия', 'острая инфекция', 'лишай',
'вымя', 'сосок', 'доить', 'температура', 'шишка', 'понос', 'запор', 'кал с кровью',
'краснота', 'слабость', 'вонь', 'буйный', 'не кушает', 'не даёт молоко', 'пьет мочу',
'не ходит', 'не встает', 'хромает', 'орёт', 'кашляет', 'чихает', 'глаза слезятся',
'идет пена', 'пахнет аммиаком', 'после отёла', 'вялость', 'аборт', 'свищ', 'гной из раны',
'кровавая моча', 'выделения из носа', 'рвота', 'истощение', 'судороги', 'расширенные зрачки'
]
anchor_embeddings = [np.mean(np.stack(glove[target]), axis=0) for target in anchors]
distance_functions = (cosine, cityblock, euclidean, braycurtis)
def embedings2features(text_embeddings: List[np.ndarray]) -> pd.Series:
result = OrderedDict()
for embedding in text_embeddings:
for anchor_embedding, anchor in zip(anchor_embeddings, anchors):
anchor = '_'.join(anchor.split())
for dist_func in distance_functions:
feature_name = f'{anchor}_{dist_func.__name__}'
dist = float(dist_func(embedding, anchor_embedding))
if feature_name not in result:
result[feature_name] = dist
else:
result[feature_name] = min(dist, result[feature_name])
return pd.Series(result)
def embedings2distances(text_embeddings: List[np.ndarray]) -> Tuple[pd.Series, np.ndarray]:
result = OrderedDict()
distances = []
for embedding in text_embeddings:
D = np.ones((len(anchor_embeddings), len(distance_functions)), dtype=np.float32) * np.inf
for i, (anchor_embedding, anchor) in enumerate(zip(anchor_embeddings, anchors)):
anchor = '_'.join(anchor.split())
for j, dist_func in enumerate(distance_functions):
feature_name = f'{anchor}_{dist_func.__name__}'
dist = float(dist_func(embedding, anchor_embedding))
D[i, j] = dist
if feature_name not in result:
result[feature_name] = dist
else:
result[feature_name] = min(dist, result[feature_name])
distances.append(D)
return | pd.Series(result) | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/8 22:08
Desc: 金十数据中心-经济指标-美国
https://datacenter.jin10.com/economic
"""
import json
import time
import pandas as pd
import demjson
import requests
from akshare.economic.cons import (
JS_USA_NON_FARM_URL,
JS_USA_UNEMPLOYMENT_RATE_URL,
JS_USA_EIA_CRUDE_URL,
JS_USA_INITIAL_JOBLESS_URL,
JS_USA_CORE_PCE_PRICE_URL,
JS_USA_CPI_MONTHLY_URL,
JS_USA_LMCI_URL,
JS_USA_ADP_NONFARM_URL,
JS_USA_GDP_MONTHLY_URL,
)
# 东方财富-美国-未决房屋销售月率
def macro_usa_phs():
"""
未决房屋销售月率
http://data.eastmoney.com/cjsj/foreign_0_5.html
:return: 未决房屋销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
'type': 'GJZB',
'sty': 'HKZB',
'js': '({data:[(x)],pages:(pc)})',
'p': '1',
'ps': '2000',
'mkt': '0',
'stat': '5',
'pageNo': '1',
'pageNum': '1',
'_': '1625474966006'
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(',') for item in data_json['data']])
temp_df.columns = [
'时间',
'前值',
'现值',
'发布日期',
]
temp_df['前值'] = pd.to_numeric(temp_df['前值'])
temp_df['现值'] = pd.to_numeric(temp_df['现值'])
return temp_df
# 金十数据中心-经济指标-美国-经济状况-美国GDP
def macro_usa_gdp_monthly():
"""
美国国内生产总值(GDP)报告, 数据区间从20080228-至今
https://datacenter.jin10.com/reportType/dc_usa_gdp
:return: pandas.Series
2008-02-28 0.6
2008-03-27 0.6
2008-04-30 0.9
2008-06-26 1
2008-07-31 1.9
...
2019-06-27 3.1
2019-07-26 2.1
2019-08-29 2
2019-09-26 2
2019-10-30 0
"""
t = time.time()
res = requests.get(
JS_USA_GDP_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国国内生产总值(GDP)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "53",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gdp"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国CPI月率报告
def macro_usa_cpi_monthly():
"""
美国CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_cpi
https://cdn.jin10.com/dc/reports/dc_usa_cpi_all.js?v=1578741110
:return: 美国CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CPI_MONTHLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国居民消费价格指数(CPI)(月环比)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "9",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "cpi_monthly"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国核心CPI月率报告
def macro_usa_core_cpi_monthly():
"""
美国核心CPI月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_cpi
https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v=1578740570
:return: 美国核心CPI月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_core_cpi_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心CPI月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "6",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_core_cpi"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国个人支出月率报告
def macro_usa_personal_spending():
"""
美国个人支出月率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_personal_spending
https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v=1578741327
:return: 美国个人支出月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_personal_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国个人支出月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "35",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_personal_spending"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国零售销售月率报告
def macro_usa_retail_sales():
"""
美国零售销售月率报告, 数据区间从19920301-至今
https://datacenter.jin10.com/reportType/dc_usa_retail_sales
https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v=1578741528
:return: 美国零售销售月率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_retail_sales_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国零售销售月率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "39",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_retail_sales"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国进口物价指数报告
def macro_usa_import_price():
"""
美国进口物价指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_import_price
https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v=1578741716
:return: 美国进口物价指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_import_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国进口物价指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "18",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_import_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-物价水平-美国出口价格指数报告
def macro_usa_export_price():
"""
美国出口价格指数报告, 数据区间从19890201-至今
https://datacenter.jin10.com/reportType/dc_usa_export_price
https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v=1578741832
:return: 美国出口价格指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_export_price_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国出口价格指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "79",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_export_price"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-LMCI
def macro_usa_lmci():
"""
美联储劳动力市场状况指数报告, 数据区间从20141006-至今
https://datacenter.jin10.com/reportType/dc_usa_lmci
https://cdn.jin10.com/dc/reports/dc_usa_lmci_all.js?v=1578742043
:return: 美联储劳动力市场状况指数报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_LMCI_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美联储劳动力市场状况指数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "93",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "lmci"
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国失业率报告
def macro_usa_unemployment_rate():
"""
美国失业率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_unemployment_rate
https://cdn.jin10.com/dc/reports/dc_usa_unemployment_rate_all.js?v=1578821511
:return: 获取美国失业率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_UNEMPLOYMENT_RATE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国失业率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "47",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "unemployment_rate"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-失业率-美国挑战者企业裁员人数报告
def macro_usa_job_cuts():
"""
美国挑战者企业裁员人数报告, 数据区间从19940201-至今
https://datacenter.jin10.com/reportType/dc_usa_job_cuts
https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v=1578742262
:return: 美国挑战者企业裁员人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_job_cuts_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国挑战者企业裁员人数报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "78",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_job_cuts"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国非农就业人数报告
def macro_usa_non_farm():
"""
美国非农就业人数报告, 数据区间从19700102-至今
https://datacenter.jin10.com/reportType/dc_nonfarm_payrolls
https://cdn.jin10.com/dc/reports/dc_nonfarm_payrolls_all.js?v=1578742490
:return: 美国非农就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_NON_FARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国非农就业人数"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "33",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "non_farm"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-就业人口-美国ADP就业人数报告
def macro_usa_adp_employment():
"""
美国ADP就业人数报告, 数据区间从20010601-至今
https://datacenter.jin10.com/reportType/dc_adp_nonfarm_employment
https://cdn.jin10.com/dc/reports/dc_adp_nonfarm_employment_all.js?v=1578742564
:return: 美国ADP就业人数报告-今值(万人)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_ADP_NONFARM_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国ADP就业人数(万人)"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(万人)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "adp"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国核心PCE物价指数年率报告
def macro_usa_core_pce_price():
"""
美国核心PCE物价指数年率报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_core_pce_price
https://cdn.jin10.com/dc/reports/dc_usa_core_pce_price_all.js?v=1578742641
:return: 美国核心PCE物价指数年率报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
JS_USA_CORE_PCE_PRICE_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国核心PCE物价指数年率"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "80",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "core_pce_price"
return temp_df
# 金十数据中心-经济指标-美国-劳动力市场-消费者收入与支出-美国实际个人消费支出季率初值报告
def macro_usa_real_consumer_spending():
"""
美国实际个人消费支出季率初值报告, 数据区间从20131107-至今
https://datacenter.jin10.com/reportType/dc_usa_real_consumer_spending
https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v=1578742802
:return: 美国实际个人消费支出季率初值报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_real_consumer_spending_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国实际个人消费支出季率初值报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "81",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df = temp_df.astype("float")
temp_df.name = "usa_real_consumer_spending"
return temp_df
# 金十数据中心-经济指标-美国-贸易状况-美国贸易帐报告
def macro_usa_trade_balance():
"""
美国贸易帐报告, 数据区间从19700101-至今
https://datacenter.jin10.com/reportType/dc_usa_trade_balance
https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v=1578742911
:return: 美国贸易帐报告-今值(亿美元)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_trade_balance_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国贸易帐报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(亿美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"category": "ec",
"attr_id": "42",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = | pd.to_datetime(temp_se.iloc[:, 0]) | pandas.to_datetime |
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from spandex.targets import scaling as scl
@pytest.fixture(scope='module')
def col():
return pd.Series([1, 2, 3, 4, 5])
@pytest.fixture(scope='module')
def target_col():
return 'target_col'
@pytest.fixture(scope='module')
def df(target_col):
# a b a b a b a b a b
l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
return pd.DataFrame(
{target_col: l,
'geo_id': ['a', 'b'] * 5,
'filter_col': [x + 100 for x in l]})
@pytest.mark.parametrize('metric', ['mean', 'median'])
def test_scale_col_to_target_mean_median(col, metric):
target = 600
expected = pd.Series([200, 400, 600, 800, 1000])
result = scl.scale_col_to_target(col, target, metric=metric)
assert getattr(result, metric)() == target
pdt.assert_series_equal(result, expected, check_dtype=False)
def test_scale_col_to_target_sum(col):
target = 16
expected = col * target / col.sum()
result = scl.scale_col_to_target(col, target, metric='sum')
assert result.sum() == target
pdt.assert_series_equal(result, expected)
def test_scale_col_to_target_clip(col):
target = 600
clip_low = 450
clip_high = 999
expected = pd.Series([450, 450, 600, 800, 999])
result = scl.scale_col_to_target(
col, target, metric='mean', clip_low=clip_low, clip_high=clip_high)
pdt.assert_series_equal(result, expected, check_dtype=False)
def test_scale_col_to_target_round(col):
target = 16
result = scl.scale_col_to_target(
col, target, metric='sum', int_result=True)
pdt.assert_series_equal(result, col)
def test_scale_to_targets(df, target_col):
targets = [100, 1000]
filters = [['geo_id == "a"', 'filter_col < 106'], 'geo_id == "b"']
metric = 'sum'
result = scl.scale_to_targets(df, target_col, targets, metric, filters)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series(
[11.11111111, 66.66666667, 33.33333333, 133.33333333, 55.55555556,
200, 7, 266.66666667, 9, 333.33333333]),
check_dtype=False)
def test_scale_to_targets_no_segments(df, target_col):
target = [1000]
metric = 'mean'
result = scl.scale_to_targets(df, target_col, target, metric=metric)
pdt.assert_index_equal(result.columns, df.columns)
pdt.assert_series_equal(
result[target_col],
pd.Series(
[181.81818182, 363.63636364, 545.45454545, 727.27272727,
909.09090909, 1090.90909091, 1272.72727273, 1454.54545455,
1636.36363636, 1818.18181818]),
check_dtype=False)
def test_scale_to_targets_clip_int(df, target_col):
target = [1000]
metric = 'mean'
clip_low = 400
clip_high = 999.99
int_result = True
result = scl.scale_to_targets(
df, target_col, target, metric, clip_low=clip_low, clip_high=clip_high,
int_result=int_result)
| pdt.assert_index_equal(result.columns, df.columns) | pandas.util.testing.assert_index_equal |
#%%
# ANCHOR IMPORTS
import sys
import pandas as pd, numpy as np
import pickle
import re
from sklearn import feature_extraction , feature_selection
from scipy.sparse import csr_matrix, hstack
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import Normalizer
from tqdm.autonotebook import trange, tqdm
import swifter
# Libraries for feature engineering.
import string
from collections import Counter # not necessary?
#from nnsplit import NNSplit
import spacy# .tokenizer.tokenize
from spellchecker import SpellChecker
# Other neat features.
from nltk.metrics.distance import edit_distance
from lexicalrichness import LexicalRichness
import syllables
import itertools
import textstat
# Stats
from scipy.stats import chisquare
#from statistics import mean
#%% Get spacy docs and save them to data to speed up development.
def get_docs(data, text_col='text_clean'):
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter")
data['docs'] = data[tect_col].apply(lambda x: nlp(x))
#%%
def listify(series, feature_name=str):
return [{feature_name: x[1]} for x in series.items()]
#%%
# Extract Baseline feature
# Character trigrams (morphological/lexical/semantic?).
def ngrams(train, test, params):
"""Extract character ngrams.
Args:
train (list): list of texts to fit the vectorizer.
test (list): list of texts to transform to feature space.
params (dict): parameters for the vectorizer construction
Returns:
[type]: [description]
"""
vectorizer = CountVectorizer(lowercase=params['ngrams']['lowercase'],
ngram_range=params['ngrams']['size'], # experiment with ranges, e.g. ngram_range=(3,3)
analyzer=params['ngrams']['type'], #, also try "char_wb"
max_features=params['ngrams']['max_vocab']) # max_features=10000
# fit count vecotorizer to preprocessed tweets.
#vectorizer.fit(train)
# Transform into input vectors for train and test data.
train_vectors = vectorizer.fit_transform(train) # using fit_transform due to better implementation.
#train_vectors = vectorizer.transform(train) #.toarray()
test_vectors = vectorizer.transform(test) #.toarray()
# Inspect with vectorizer.get_feature_names() and .toarray()
#inverse = vectorizer.inverse_transform(train)
#feature_names = vectorizer.get_feature_names()
#print(f'Train ({type(train_vectors)}) feature matrix has shape: {train_vectors.shape}')
#print(f'Test ({type(test_vectors)}) feature matrix has shape: {test_vectors.shape}')
#return vectorizer
return vectorizer, train_vectors , test_vectors
#return inverse
#%% ANCHOR EXTRACT LIWC
def parse_liwc(file, **args):
"""Parse a (left) aligned version of the LIWC lexicon.
Args:
file (str): filepath to lexcion (excel).
Returns:
DataFrame: df or dict
"""
df = pd.read_excel(file, skiprows=2)
# Handling merged columns in file
### Adapted from https://stackoverflow.com/a/64179518 ###
df.columns = df.columns.to_series()\
.replace('Unnamed:\s\d+', np.nan, regex=True).ffill().values
# Multindex to represent multiple columns for some categories.
df.columns = pd.MultiIndex.from_tuples([(x, y)for x, y in
zip(df.columns, df.columns.to_series().groupby(level=0).cumcount())])
### Accessed 26-04-2021 ###
# d = data.to_dict(orient='list')
### Adapted from https://stackoverflow.com/a/50082926
# dm = data.melt()
# data = dm.set_index(['variable', dm.groupby('variable').cumcount()]).sort_index()['value'].unstack(0)
### Accessed 26-04-2021 ###
# Concat the terms by column.
# d = dict()
#d = {column: value for key, value in dd.items()}
# for ki, wl in dd.items():
# nl = []
# k, i = ki
# # for w in wl:
# # if w not in nl:
# # d[k].append(wl)
# if k in d:
# d[k].append(wl)
# else:
# d[k] = wl
### Solution from https://stackoverflow.com/a/48298420 ###
# TODO experiment with not sorting the index? or reesrorting columns to mach the multiindex or just original df.columns.
df = df.stack().sort_index(level=1).reset_index(drop=True)
### Accessed 26-04-2021 ###
# Check that merged columns have the right number of terms.
# sum(isinstance(x, str) for x in terms['Funct'])
return df.to_dict(orient='list')
#%%
# Extract LIWC matches (lexical/semantic)
def liwc_match(parsed, d, extract=False, text_col='text_clean'):
"""Search a corpus for matches against LIWC (2007) categories.
Args:
parsed (DataFrame): a pandas df with the all categories of LIWC prepared.
d (str): a filepath to a pickle file with a corpus to search.
extract (bool, optional): Switch specifying whether or not to return a Dict for feature extraction or feature inspection/analysis. Defaults to False.
Returns:
dict: a dict with {liwc_cat1...n : count} for each datapoint in the corpus OR a dict a, a dataFrame and a Series with results of searching the categories against the matches (absolute counts per datapoint (as dict and DF) totals per category (Series)).
"""
# load data to search.
# Could do Series.count(regex) or df[clean_text] -> (joined) list?
if isinstance(d, pd.DataFrame) == False: # the ... analysis case.
data = pd.read_pickle(d)
text = list(d) # a single row/tweet?
if extract == True: # The extract case
data = d
text = data[text_col]
# Dict for search results.
results = dict()
pats = dict() # save patterns to dict for debugging.
# Loop through category-termlist pairs.
for cat, terms in tqdm(parsed.items()):
# Remove nans from term lists.
terms = [term.strip(' ') for term in terms if isinstance(term, str)]
# Compile re pattern from term list.
#pat = re.compile('|'.join(terms), flags=re.MULTILINE)
#pat = re.compile('|'.join(
# [r'\b' + t[:-1] if t.endswith('*') else r'\b' + t + r'\b' for t in #terms]))
### Adapted from https://stackoverflow.com/a/65140193 ###
pat = re.compile('|'.join([r'\b' + t[:-1] + r'\w*' if t.endswith('*') else r'\b' + t + r'\b' for t in terms]) , flags=re.MULTILINE | re.IGNORECASE)
### Accessed 27-04-2021 ###
pats[cat] = pat
#i, char = enumerate(j_terms)
# for term in terms:
# i = 0
# try:
# pat = re.compile(term)
# #print(pat, counter,'\n')
# i +=1
# except:
# print('error here:\n'.upper(),pat, i)
# Aggregate matches per category into dict. storing tweet id's preserved in the source data.
#results[cat] = pat.finditer(text.values)
# For that, join values into list of lists -> re.match -> see below
# results[cat][re.match(pat)] = re.finditer(pat, row_list)
# if extract == True: You can't normalize since this isn't tokenized.
# results[cat] = text.apply(lambda x: x.str.count(pat) / len(x))
# else:
results[cat] = text.str.count(pat)
#results[cat] = text.swifter.apply(lambda x: re.finditer(pat, x))
# Store results in DataFrame
df_results = pd.DataFrame.from_dict(results)
# Totals per category
df_totals = df_results.sum().sort_values(ascending=False)
if extract == True:
# Export results to {index : {cat : count}...} for easy vectorization.
results_per_row = df_results.to_dict(orient='records') # or orient='index'? -> DictVectorizer
return results_per_row
return {'results' :
{'matches_dict' : results,
'matches_df' : df_results,
'matches_total': df_totals
},
'regex_pats' : pats
}
#%%
def norm_freqs(data, expression, count_name=str, normalize=True, analyze=True):
"""Get frequencies (normalized = optional) of a regex pattern in a Series with one or more strings.
Args:
data (DataFrame): a dataframe with texts to extract frequencies from.
expression (re.compile): a regex pattern to count occurrences of in each text.
count_name (str, optional): a name for the counted feature. Defaults to str.
normalize (bool, optional): [description]. Defaults to True.
Returns:
list: list of dicts with key = frequency name, value = frequency.
"""
# List to store frequencies
# freqList = list()
# Loop through each entry in the list of strings.
# for e in stringList:
# # Join to a regular string
# text = ' '.join(e)
# # Construct a dict for each entry with freuncies.
# c = {count_name : len([char for char in text if char in expression])}
# Get frequencies of a regex in a pandas column, normalize if set to True.
c = data.apply(lambda x: len(re.findall(
expression, x))/len(x) if normalize == True else len(re.findall(expression, x)))
### Adapted from https://stackoverflow.com/a/45452966 ###
# Cast frequencies Series to list of dicts.
cList = [{count_name: x[1]} for x in c.items()]
### Accessed 10-05-2021 ###
if analyze == True:
return cList
else:
return c
def binary_freq(data, expression, feature_name=str, analyze=True):
"""Search data for occurrences of a binary feature as a regex.
Args:
data (pd.Series): a series with text instances.
expression (re.compile): a regex or string to search for.
feature_name (str, optional): a name for the feature to extract. Defaults to str.
Returns:
list: a list with a dict mapping feature name to 1 or 0 (true/false) based on occurrence in texts.
"""
b = data.str.contains(expression).astype(int) # cast bools to 0/1
if analyze == True:
bList = [{feature_name: x[1]} for x in b.items()]
return bList
else:
return b
#%% ANCHOR extract character and word level features
# Extract character-level features (lexical/morphological).
def get_cl(data, text_col='text_clean', analyze=True):
# 0. Cast data text col .to_list()
# 1. Normalized punctation frequency.
# # Using pandas instead of lists + counter + dicts.
# df_results = pd.DataFrame({'text': textList})
# #p_pat = re.compile(r'[!"\$%&\'()*+,\-.\/:;=#@?\[\\\]^_`{|}~]*')
# p_pat = re.compile(re.escape(string.punctuation))
# df_results['punct'] = df_results.text.str.count(p_pat)
# the whole series
#train['text_clean'].str.count(p_pat)
df_punc_freq = data[text_col].apply(lambda x: len([char for char in ' '.join(x) if char in string.punctuation]) / len(' '.join(x)))
#return punc_freq, df_punc_freq
#df_punc_freq = pd.DataFrame.from_records(punc_freq)
# Add to cl dict.
#cl_results['punc_freq'] = punc_freq
#2. Specific characters (also normalized)
# 2.1 digits
d_pat = re.compile(r'\d' , re.M)
df_digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True, analyze=False)
#return df_digits
# 2.2 Whitespace chars.
ws_pat = re.compile(r' ', re.M) # NOTE just using actual whitespace instead of \s
df_whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True, analyze=False)
# 2.3 tab characters NOTE Doesn't occur in either corpus.
# tab_pat = re.compile(r'\t', re.M)
# tabs = norm_freqs(data[text_col], tab_pat, count_name='tab_freqs', normalize=True)
# 2.4 line break characters
br_pat = re.compile(r'[\r\n\f]', re.M)
df_lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True, analyze=False)
# 2.5 Upperchase chars (per all chars)
up_pat = re.compile(r'[A-Z]', re.M) # Decide whether to be greedy about *all* uppercase chars or to be lazy (below). Also, @USER mentions are counted now. Can be excluded with \b(?!USER\b)[A-Z]. Try doing [^a-z\W] - caret negates the range of chars.
#up_pat = re.compile(r'(?<![a-z])*[A-Z](?![a-z])*' , re.M) # Only count chars if they are not a one-off in the beginning of words.
df_upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True, analyze=False)
# 2.6 Special chars other than punctuation. NOTE Doesn't make much sense when using a full punctuaion set..
spc_pat = re.compile(r"[^a-z \.,!?':;\s]", re.M)
df_spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters", analyze=False)
#3. Repeated characters (binary features) # NOTE if you want counts of each repeated char, consider just defining it with regexes and then using norm_freqs, normalize=False?
# 3.1 question marks
quest_pat = re.compile(r'\?{2,}', re.M)
df_rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep', analyze=False)
# 3.2 periods (ellipsis)
per_pat = re.compile(r'\.{2,}', re.M)
df_rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep', analyze=False)
# 3.3 exclamation marks
excl_pat = re.compile(r'!{2,}', re.M)
df_rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep', analyze=False)
# 4 Contains equal signs
eq_pat = re.compile(r'=', re.M)
df_equals = binary_freq(data[text_col] , eq_pat , feature_name='equals', analyze=False)
# 5 Quotes in chars
#quotes = data[text_col].apply(lambda x: len(re.findall(quot_pat, x)) / len(x)) # per character --- works.
#quotes_char = [{'quotes' : x[1]} for x in qoutes.items()]
if analyze == True:
#punc_freq = listify(df_punc_freq, feature_name='char_punc_freq') # new Alternative to punc_freq with dict comprehension.
textList = data[text_col].to_list()
### Old approach to punc_freqs for analysis.
cl_results = dict() # dict to store results.
punc_freq = list()
for e in textList:
text = ' '.join(e)
# Build dict with counts of all punct characters.
# The first c example does it per punctuation character, the second for all.
# Each count is normalized by total number of chars in the each string.
# NOTE not using regexes here. Single quotes/apostrophes/contractions are counted as well.
#c = {char:count/len(text) for char, count in Counter(text).items() #if char in string.punctuation}
# This should generalize to regex matches.
c = {'char_punc_freq': len([char for char in text if char in string.punctuation])/len(text)}
punc_freq.append(c)
digits = norm_freqs(data[text_col], d_pat, count_name='digit_freq',normalize=True)
whitespaces = norm_freqs(data[text_col], ws_pat, count_name='whitespace_freq', normalize=True)
lbreaks = norm_freqs(data[text_col], br_pat, count_name='line_break_freq', normalize=True)
upchars = norm_freqs(data[text_col], up_pat, count_name= 'upper_char_freq', normalize=True)
spc = norm_freqs(data[text_col], spc_pat, count_name="special_characters")
rep_quest = binary_freq(data[text_col] , quest_pat, feature_name='quest_rep')
rep_per = binary_freq(data[text_col] , per_pat, feature_name='period_rep')
rep_excl = binary_freq(data[text_col] , excl_pat, feature_name='excl_rep')
equals = binary_freq(data[text_col] , eq_pat , feature_name='equals')
# Store results
cl_results['char_punc_freq'] = punc_freq
cl_results['digit_freq'] = digits
cl_results['whitespace_freq'] = whitespaces
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results['linebreak_freq'] = lbreaks
cl_results['uppercased_char_freq'] = upchars
cl_results['special_char_freq'] = spc
cl_results['repeated_questionmark'] = rep_quest
cl_results['repeated_periods'] = rep_per
cl_results['repeated_exclamation'] = rep_excl
cl_results['contains_equals'] = equals
return cl_results #punc_freq # (punc_freq , cl_results)
# Store results as df for much easier vectorization...
else:
cl_results_df = pd.DataFrame()
cl_results_df['char_punc_freq'] = df_punc_freq #✅
#pd.concat(cl_results_df)
# Store results
cl_results_df['digit_freq'] = df_digits #✅
cl_results_df['whitespace_freq'] = df_whitespaces #✅
#cl_results['tab_freq'] = tabs does not occur in either corpus.
cl_results_df['linebreak_freq'] = df_lbreaks #✅
cl_results_df['uppercased_char_freq'] = df_upchars #✅
cl_results_df['special_char_freq'] = df_spc #✅
cl_results_df['repeated_questionmark'] = df_rep_quest #✅
cl_results_df['repeated_periods'] = df_rep_per #✅
cl_results_df['repeated_exclamation'] = df_rep_excl #✅
cl_results_df['contains_equals'] = df_equals #✅
return cl_results_df
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_cl(test_df, text_col='text_clean', analyze=False)
# Extract word-level features (lexical/morphological)
def get_wl(data, text_col='text_clean', analyze=False, docs=[]):
# SpaCy pipe for rule based sentence splitting.
#blank_nlp = spacy.blank('en') # spacy.load('en_core_web_sm')
# sentencizer = blank_nlp.add_pipe("sentencizer")
# morphologizer = blank_nlp.add_pipe('morphologizer')
# blank_nlp.initialize() #
# print(nlp.pipe_names)
print('Configuring spacy for word level')
nlp = spacy.load('en_core_web_sm', disable=["lemmatizer", 'ner'])
# disable parser in favor of senter and sentencizer due to speed https://spacy.io/models
nlp.disable_pipe("parser")
nlp.enable_pipe("senter")
# Load spellchecker
spell = SpellChecker()
# load exceptions to spellchecker (Twitter, covid specifc)
try:
spell.word_frequency.load_text_file('./utils/spell_additions.txt')
except:
pass
# 1 Get lengths (total/avg words, sentence)
# rewrite features as attributes of Lengths objects?
# class Lengths:
# def __init__(self, first_feat, second_feat):
# pass
#textList = data[text_col].to_list()
wl_results = dict()
# print('TOKENIZING WORD-LEVEL FEATURES')
# data to docs
if len(docs) <= 0:
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#assert len(docs) == len(data[text_col])
# get list of sentences.
sents_c = docs.apply(lambda x: [s for s in x.sents])
# Words only (including numbers and @mentions)
sents_w = docs.apply(lambda x: [[t.text for t in s if\
t.is_punct == False and
t.is_space == False]\
for s in x.sents])
# list of *word* tokens in entire tweet.
toks = docs.apply(lambda x: [t.text for t in x if t.is_punct == False and\
t.is_space == False]) # could have used data['tokens_clean]
# alphabetic tokens only. (for spell checking)
toks_alpha = docs.apply(lambda x: [t.text for t in x if t.is_alpha == True])
# Debugging getting empty lists of alphabetic tokens.
#return pd.DataFrame({'tokens' : toks, 'alpha_tokens': toks_alpha})
toks_morph = docs.apply( lambda x: [t for t in x if t.is_alpha == True])
# print('\n GETTING WORD-LEVEL FEATURES')
# 1.1 total length of tweet in words
# c = {'total_words' : int}
# for doc in docs:
w_total_series = toks.map(len)
# 1.2 avg word length
awl = toks.apply(lambda x: sum(len(w) for w in x) / len(x))
# build dict with keys from list contained in feature_params value for lexical features > word_level. Check if they are there and populate them with the dicts below accordingly. Else don't.
# 1.3.1 avg sentence length (words)
asl_w = sents_w.apply(lambda x: sum(len(s) for s in x) / len(x))
# 1.3.2 avg sentence length (characters)
#asl_c = apply(lambda x: sum([len(''.join(s.text)) for s in x]))
asl_c = sents_c.apply(lambda x: sum(len(''.join(s.text)) for s in x) / len(x))
# 2.1 number of uppercased words.
uws = toks_alpha.apply(lambda x: len([t for t in x if t.isupper() == True]) / len(x) if len(x) > 0 else 0.0)
# 2.2 number of short words
# use len of token <=3
sws = toks_alpha.apply(lambda x: len([t for t in x if len(t) <=3]) / len(x) if len(x) > 0 else 0.0)
# 2.3 number of elongated words
# use regex \b\w{3,}\b
elw_pat = re.compile(r'(\w)\1{2,}', re.M)
elws = toks_alpha.apply(lambda x: len([t for t in x if elw_pat.search(t)]) / len(x) if len(x) > 0 else 0.0)
# 2.4 number of number-like tokens (both digits and numerals)
nss = docs.apply(lambda x: len([t for t in x if t.like_num == True]) / len(x))
# 2.5 frequency of specific verb tenses
pst = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Past']]).map(len).divide(toks_alpha.map(len))
prs = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Tense') == ['Pres']]).map(len).divide(toks_alpha.map(len)) #NOTE using series.divide instead for if/else check with regular might give a problem with vectorizers.
adj_pos = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Pos']]).map(len).divide(toks_alpha.map(len))
adj_c_s = toks_morph.apply(lambda x: [t.morph for t in x if t.morph.get('Degree') == ['Cmp'] or t.morph.get('Degree') == ['Sup']]).map(len).divide(toks_alpha.map(len))
# Here you could add future tense, mood etc.
# 2.6 Frequency of OOV words (according to spaCy model)
# token.is_oov
# 3. Frequencies of emotes/jis.
e = data['emotes'].apply(lambda x: len(x[0] + x[1])).divide(toks.map(len)) # normalized by tokens.
# 4. Non-standard spelling. Reconsider including this. It mostly captures proper names and acronyms if it has to be this fast.
sc = toks_alpha.apply(lambda x: spell.unknown(x)).map(len).divide(toks_alpha.map(len))
# 5. number of quoted words
# NOTE normalized by words (in match / in tweet)
quot_pat = re.compile(r"(\".+?\"|\B'.+?'\B)") # should this be quot_pat = re.compile(r("\".+?\"|\B'.+?'\B")) #
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x).split(' ')).map(len).divide(toks_alpha.map(len)) # per word (split on whitespace).
print('Tokenizing quote spans')
quotes = data[text_col].swifter.apply(lambda x:
[t for t in nlp(' '.join(re.findall(quot_pat, x))) if t.text.isalnum()]).map(len).divide(toks.map(len))
#return pd.DataFrame({'org_text': data[text_col],'alpha_toks': toks_alpha, 'quoted_toks' : quotes, 'quoted_lens' : quotes_lens})
#quotes = data[text_col].apply(lambda x: re.findall(quot_pat, x)).map(len).divide(toks_alpha.map(len)) # not finished. need to tokenize matches.
#quotes = sents_c.apply(lambda x: len([re.findall(quot_pat, s) for s in x]) / len(x))# per sentence - doesn't work.
# 6. Vocab richness/complexity
# 6.1 Type-token ratio.
tt = toks_alpha.apply(lambda x: len(set(x)) / len(x) if len(x) > 0 else 0.0) # could use Counter instead of set()
# 6.2.1 Hapax legomena
### Adapted from https://stackoverflow.com/a/1801676 ###
hlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 1]) / len(x) if len(x) > 0 else 0.0) # could also lower with list comprehension.
### accessed 13-05-2021 ###
# 6.2.2 Hapax dislegomena (words that occur twice only)
hdlg = toks_alpha.apply(lambda x: len([word for word, count in Counter(map(str.lower, x)).items() if count == 2]) / len(x) if len(x) > 0 else 0.0)
# Here you would implement complexity measures
#- Brunet's W Measure
#- Yule's K Characteristic
#- Honore's R Measure
#- Sichel's S Measure
#- Simpson's Diversity Index
# 7. syllable frequencies #NOTE this is averaged/normalized syllable frequncies. NOTE the syllables docs suggest using cmudict for accuracy over speed.
sfr = toks_alpha.apply(lambda x: sum([syllables.estimate(w) for w in x]) / len(x) if len(x) > 0 else 0.0) # could also use statistics.mean for all of these averages..
# 8. Readability
# Flesch-Kincaid reading ease
fk = data[text_col].apply(lambda x: textstat.flesch_reading_ease(x))
# # 8.1 Automated Readability Index
# ari = data[text_col].swifter.apply(lambda x: textstat.automated_readability_index(x))
# r_ari = listify(ari, feature_name='automated_readability_index')
# # 8.2 Coleman-Liau index
# cli = data[text_col].swifter.apply(lambda x: textstat.coleman_liau_index(x))
# r_cli = listify(cli, feature_name='coleman_liau_index')
# # 8.3 Dale Chall Readability Index
# dci = data[text_col].swifter.apply(lambda x: textstat.dale_chall_readability_score(x))
# r_dci = listify(dci, feature_name='dale_chall_index')
# # 8.4 Gunning Fog Index
# gfi = data[text_col].swifter.apply(lambda x: textstat.gunning_fog(x))
# r_gfi = listify(gfi, feature_name='gunning_fog_index')
# 8.5 Consensus based on all tests in textstat.
# consensus = data[text_col].swifter.apply(lambda x: textstat.text_standard(x, float_output=True))
# r_consensus = listify(consensus, feature_name='readability_consensus_score')
# Could add basic sentiment with doc.token.sentiment?
# Store results TODO store each list of dicts in separate dict on the same level.
# wl_results = {
# {'length_features' : w_total, w_len_avg, asl_w, asl_c},
# {'specific_w_frequencies' : upper_ws, shortws, elongws, nums, past_freq, pres_freq, adj_positives, adj_cmp_sup ,ems},
# {'nonstandard_spelling' : s_check},
# {'words_in_quotes' : quot_ws},
# {'richess/complexity' : ttr, hlgs, hldgs},
# {'syllable frequencies' : syl_freq},
# {'readability' : r_fk, r_ari, r_cli, r_dci, r_gfi, r_consensus}
# }
# print('\nSTORING RESULTS')
# print('DONE')
if analyze == True:
w_total = [{'len_total_words': x[1]} for x in toks.map(len).items()]
w_len_avg = [{'avg_word_length' : x[1]} for x in awl.items()]
asl_w_avg = [{'avg_sent_len_words': x[1]} for x in asl_w.items()]
asl_c_avg = [{'avg_sent_len_chars' : x[1]} for x in asl_c.items()] # move this to character level.
upper_ws = [{'upper_words': x[1]} for x in uws.items()]
shortws = [{'short_words': x[1]} for x in sws.items()]
elongws = [{'elongated_words' : x[1]} for x in elws.items()]
nums = listify(nss, feature_name='numerical_tokens_frequency')
past_freq = listify(pst, feature_name = 'past_tense_frequency')
pres_freq = listify(prs, feature_name='present_tense_frequency')
adj_positives = listify(adj_pos, feature_name='positive_adjectives')
adj_cmp_sup = listify(adj_c_s, feature_name='comp_and_sup_adjectives')
ems = [{'emote_frequencies': x[1]} for x in e.items()]
s_check = [{'nonstandard_words': x[1]} for x in sc.items()]
quot_ws = listify(quotes, feature_name = 'quotes_in_words')
ttr = [{'type-token_ratio': x[1]} for x in tt.items()]
hlgs = listify(hlg, feature_name= 'hapax_legomena')
hdlgs = listify(hdlg, feature_name='hapax_dislegomena')
syl_freq = [{'avg_syllable_freq': x[1]} for x in sfr.items()]
r_flk = [{'flesch_kincaid_reading_ease' : x[1]} for x in fk.items()]
# Store results in dict.
wl_results['total_word_len'] = w_total
wl_results['avg_word_len'] = w_len_avg
wl_results['avg_sentence_len_words'] = asl_w_avg
wl_results['avg_sentence_len_chars'] = asl_c_avg
wl_results['uppercased_words'] = upper_ws
wl_results['short_words'] = shortws
wl_results['elongated_words'] = elongws
wl_results['numberlike_tokens'] = nums
wl_results['past_tense_words'] = past_freq
wl_results['present_tense_words'] = pres_freq
wl_results['positive_adjectives'] = adj_positives
wl_results['comp_and_sup_adjectives'] = adj_cmp_sup
wl_results['emotes'] = ems
wl_results['nonstandard_spelling'] = s_check # exclude?
wl_results['quoted_words'] = quot_ws
wl_results['type_token_ratio'] = ttr
wl_results['hapax_legomena'] = hlgs
wl_results['hapax_dislegomena'] = hdlgs
wl_results['syllable_freqs'] = syl_freq #takes too long?
wl_results['readability_flesch_kincaid'] = r_flk
# wl_results['readability_ari'] = r_ari
# wl_results['readability_coleman_liau'] = r_cli
# wl_results['readability_dale_chall'] = r_dci
# wl_results['readability_gunning_fog'] = r_gfi
#wl_results['readability_consensus'] = r_consensus
return wl_results
else:
# Build dataframe
wl_results_df = pd.DataFrame()
wl_results_df['total_word_len'] = w_total_series #✅
wl_results_df['avg_word_len'] = awl #✅
wl_results_df['avg_sentence_len_words'] = asl_w #✅
wl_results_df['avg_sentence_len_chars'] = asl_c #✅
wl_results_df['uppercased_words'] = uws #✅
wl_results_df['short_words'] = sws #✅
wl_results_df['elongated_words'] = elws #✅
wl_results_df['numberlike_tokens'] = nss #✅
wl_results_df['past_tense_words'] = pst #✅
wl_results_df['present_tense_words'] = prs #✅
wl_results_df['positive_adjectives'] = adj_pos #✅
wl_results_df['comp_and_sup_adjectives'] = adj_c_s #✅
wl_results_df['emotes'] = e #✅
wl_results_df['nonstandard_spelling'] = sc #✅
wl_results_df['quoted_words'] = quotes # ✅
wl_results_df['type_token_ratio'] = tt #✅
wl_results_df['hapax_legomena'] = hlg #✅
wl_results_df['hapax_dislegomena'] = hdlg #✅
wl_results_df['syllable_freqs'] = sfr #✅
wl_results_df['readability_flesch_kincaid'] = fk #✅
return wl_results_df
#return get_wl(data)#get_cl(data) , get_wl(data)
#%%
# Debugging
# test_df = train.iloc[:50, :]
# test = get_wl(test_df, analyze=False)
# %%
#%%
# Extract sentence-level features (syntactic)
def get_sl(data, text_col = 'text_clean',cv=None , train=False, analyze=False):
# load spacy model.
print('Loading spacy model')
nlp = spacy.load('en_core_web_sm')
nlp.enable_pipe("senter") #TODO Added senter to get_sl while passing on docs for speed.
# For POS tags, you could map a pos tag sequence/vector to the tweet.
# Initialize CounVectorizer for pos ngrams. store pos tags in separate column and transform with sklearn-pandas per column instead.
if train == True:
cv = CountVectorizer(analyzer='word', ngram_range=(1,3))
else:
cv = cv
# Retoknize the text
docs = data[text_col].swifter.apply(lambda x: nlp(x))
#toks = docs.apply(lambda x: [t.text for t in x]) # not used.
#return pd.DataFrame({'docs' : docs.map(len) , 'toks': toks.map(len)})
# Frequencies
# 1.1 frequencies of stop words (i.e. function words)
sts = docs.apply(lambda x: len([t.text for t in x if t.is_stop == True]) / len(x)) # normalized by all tokens (including numbers and punct.)
# 1.2 frequencies of punctuation
pnct = docs.apply(lambda x: len([t.text for t in x if t.is_punct == True]) / len(x))
# 1.3 Frequencies of roots (normalized by total number of words in tweet).
rts = docs.apply(lambda x: len([(t, t.dep_) for t in [t for t in x if t.is_space == False] if t.dep_ == 'ROOT']) / len(x)) # This still includes number-like tokens, punctuation and mentions, since these are relevant in the dependency trees. Normalization could account for whitespaces, but doesn't have to.
# 3. POS frequencies.
# Extract pos tags:count (use Counter)
pos = docs.apply(lambda x: [t.pos_ for t in x if t.text.isalnum() == True])
pos_freq = docs.apply(lambda x: {p:c/len([t for t in x if t.text.isalnum() == True]) for p, c in Counter([t.pos_ for t in x if t.text.isalnum() == True ]).items()}) # normalized by alphanumeric tokens (since punctuation frequencies are captured separately).
#pos_freq = [{k:v} for k, v in pfreq.items()]
#return pd.DataFrame({'text' : data[text_col] , 'tokens' : toks, 'pos' : pos})
# 4. POS ngrams (n=uni-bi-tri) - TODO move to ngrams
# join pos tags into strings for CountVectorizer -> return as special case. Do a type check in the lookup or vectorize function that just passes the matrix on. OR pass on POS strings to vectorize in the vectorize function?
#print('fit/transforming posgrams')
pgrams = pos.str.join(' ').to_list()
if train == True:
pgram_matrix = cv.fit_transform(pgrams)
#return cv, pgram_matrix
else:
pgram_matrix = cv.transform(pgrams)
# Sketch of countvectorizing pos ngrams.
#cv.fit_transform(test.str.join(sep=' ').to_list()) # This works. consider how to get pos ngrams and still make them interpretable in the corpora - e.g. most frequent triplets? Does that even really tell you anthing? You could Counter or use a pandas method to get most frequent combination?
# {k:v for k, v in Counter(cv.get_feature_names()).items()}
# Note Counter has counter.most_common(n)
# Could use nltk.util.ngrams(sequence, n) as suggested here https://stackoverflow.com/questions/11763613/python-list-of-ngrams-with-frequencies
# 6. Sentiment?
# sentis = docs.apply(lambda x: sum([t.sentiment for t in x])) # doesn't work. needs training?
#return pd.DataFrame({'n_sents_spacy' : n_sents, 'n_sents_tstat' : n_sents_tstat})
if analyze == True:
# Store results.
stop_freq = listify(sts, feature_name='stopword_frequency')
punct_freq = listify(pnct, feature_name='punctuation_freq')
root_freq = listify(rts, feature_name='root_frequencies')
syn_results = {'stopword_freq': stop_freq,
'syn_punc_freq' : punct_freq,
'root_freq': root_freq,
'pos_freq' : list(pos_freq),
'pos_ngrams' : pgram_matrix}
return cv, syn_results
else:
syn_results_df = pd.DataFrame()
syn_results_df['stopword_freq'] = sts
syn_results_df['syn_punc_freq'] = pnct
syn_results_df['root_freq'] = rts
#syn_results_df['pos_freq'] = list(pos_freq)
#syn_results_df['pos_ngrams'] = pgram_matrix
return docs, cv, pgram_matrix, syn_results_df
# To call on test data, remember to call it on the cv returning after calling it on the training data - call it 'train_cv' in model.py
#%%
# Debugging
# test_df = train.iloc[:50,:]
# test = get_sl(test_df, train=True, analyze=True)
#%% ANCHOR testing get_syn
# extract_feats(test_df, analyze=True, train=True)
# NOTE when extracting in model.py, call twice instead of once.
#train.columns.get_loc('text_clean')
# test_df = train.iloc[:50, :] # versus list version: train_text[:20]
# test = get_syn(test_df)
# # val_test = get_lexical(train_text[:5])
#%%
#%%
# Extract document-level features (structural)
def get_dl(data, text_col='text_clean', analyze=True, docs=[]):
# 1. Number of sentences
if len(docs) <= 0:
print('Configuring spacy model for document level')
nlp = spacy.load('en_core_web_sm', disable=['lemmatizer', 'parser','tagger','ner'])
nlp.enable_pipe('senter') # this is the main diff between wl, sl and dl.
docs = data[text_col].swifter.apply(lambda x: nlp(x))
ns = docs.apply(lambda x: len([s for s in x.sents])) #en_web_sm is not as accurate as blank or textstat.
# ns = data[text_col].apply(
# lambda x: textstat.sentence_count(x))
# 2. Number of user mentions - absolute counts.
ms = data[text_col].str.count('@user', flags=re.I|re.M)
# Could be expanded to include hashtags and urls in the future here.
if analyze == True:
n_sents = listify(ns, feature_name = 'number_of_sentences')
ments = listify(ms, feature_name = 'number_of_mentions')
struc_results = {'n_sents': n_sents, 'n_mentions': ments} # before skiping listify.
#struc_results = {'n_sents' : ns, 'n_mentions' : ms}
return struc_results
else:
struc_results_df = pd.DataFrame()
struc_results_df['n_sents'] = ns #✅
struc_results_df['n_mentions'] = ms #✅
return struc_results_df
#%%
# Testing get_struc.
#test = get_dl(test_df, analyze=False)
#%%
# ANCHOR function to lookup and get specific [{features: x.x}] from extraction funct.
def feature_lookup(f_param_dict, extracted_features):
feature_name1 = [{'feature_name' : 0.0}]
for var in locals():
if var in f_param_dict['some_feature_cat1']:
return locals()[var]
# also look into dpath, dict-toolbox2
#%%
# Test feature_lookup
# t = {'some_feature_cat1': ['feature_name1', 'feature_name2']}
# feature_lookup(t)
#%%
def conc_features(matrixList):
# Concatenate feature vectors
# pass a list or dict of matrices and do list/dict comprehension/unpacking?
#combined_features = hstack([feature_vector1, feature_vector2], 'csr')
combined_features = hstack(matrixList, 'csr')
return combined_features
#%%
def d_vectorize(selected_feats, train=False, dv=None):
# Old approach: Vectorize all generated lists of dicts (stored in a dict or list?).
# if train == True:
# dv = DictVectorizer()
# #X = d.fit_transform(dictList)
# # Either store as list.
# dvList = []
# matList = []
# # Or in single dict
# #matDict = dict() using dv as a key just overwrites the value since they are all identical. Nesting the dict just complicates things even more...
# if train == True:
# # Iterate through feature lists of dictionaries (lexical, syntactic, structural)
# for feature_name, feat_list in selected_feats.items():
# #print(feature_name, feat_list)
# #return
# if feature_name == 'pos_ngrams': # Check for pos_ngrams (already vectorized)
# matList.append(feat_list) # if pos_ngrams feat matrix, just append it.
# #matDict[dv] = feat_list
# continue
# if train == True:
# feat_matrix = dv.fit_transform(feat_list)
# # NOTE storing each vectorizer
# dvList.append(dv)
# matList.append(feat_matrix)
# # This is the test case
# # The test case. transforming test data to fitted individual dvs.
# if train == False: #iterate through each dv and all the feature lists.
# feat_lists = []
# # this has to only fit once per feature dv-featurelist pair.
# for feature_name, feat_list in selected_feats.items():
# if feature_name == 'pos_ngrams':
# matList.append(feat_list)
# continue
# feat_lists.append(feat_list)
# #return(feat_lists)
# for dv, featList in list(zip(dvs, feat_lists)): # enable this to loop through both dvs and features.
# #print(dv, featList)
# feat_matrix = dv.transform(featList) # this needs to be passed its corresponding dv. if you store in zip/list, it should have the same, fixed order. but how to iterate?
# matList.append(feat_matrix)
# #matDict[dv] = feat_matrix
# # Is LIWC a separate case? Should be the same as engineered features.
# #return matDict#dv, matList #matDict.values() should be list of matrices equal to number of features. To be concatenated.
# return dvList, matList
# New approach - using dfs with selected features.
# 1. Get list of dicts, row-wise from selected features DF.
feats = selected_feats.to_dict('records')
if train == True:
dv = DictVectorizer()
feats_vecs = dv.fit_transform(feats)
return dv , feats_vecs
else:
feats_vecs = dv.transform(feats)
return dv, feats_vecs
#%%
####
# test_df = train.iloc[:50,:]
# sent_cv_train, extracted_train = extract_feats(test_df, text_col='text_clean', analyze=False, train=True, feature_pms=feature_params)
# sent_cv_test, extracted_test = extract_feats(val.iloc[:50,:], text_col='text_clean', analyze=False, train=False, cv=sent_cv_train, feature_pms=feature_params)
# train_dv, train_vecs = d_vectorize(train_selected_feats_df, train=True)
# test_dv, test_vecs = d_vectorize(test_selected_feats_df, train=False, dv=train_dv)
####
#test = d_vectorize(extracted_test, train=False, dvs=train_dvs)
# Then d_vectorize LIWC matches.
# Then concat all of the vectorized features.
# Then fit model!
#%%
def extract_feats(data, text_col='text_clean', feature_pms=dict(), analyze=False, cv=None, train=False):
# Data = dataframe - can be recast by child functions.
# See if resetting data index speeds up extraction.
data.reset_index(drop=True, inplace=True)
# lowercase all @USER mentions. An artifact from preprocessing.
data[text_col] = data[text_col].str.replace(
'@USER', '@user') # , inplace=True)
all_features_dict = dict()
all_features_df_list = []
selected_features = dict()
# 1. Call each of the extractor functions
# 1.3 Sentence-level # TODO moved up to pass docs to other extraction functs for speed.
print('Sentence level features')
if analyze == True:
docs = []
sent_cv, sent_lvl = get_sl(
data, text_col=text_col, cv=cv, analyze=analyze, train=train)
else:
docs, sent_cv, pgram_matrix, sent_lvl = get_sl(data, text_col=text_col, cv=cv, analyze=analyze, train=train)
# 1.1 Character-level (10 features)
print('Character level features')
char_lvl = get_cl(data, text_col=text_col, analyze=analyze)
# 1.2 Word-level
print('Word level features')
word_lvl = get_wl(data, text_col=text_col, analyze=analyze, docs=docs)
#sent_lvl = word_lvl.copy(deep=True)
#return sent_lvl
# if train == False:
# sent_cv, sent_lvl = get_sl(data, text_col=text_col, analyze=analyze)
# 1.4 Document-level
print('Document level features')
doc_lvl = get_dl(data, text_col=text_col, analyze=analyze, docs=docs)
#return doc_lvl
# Return all features if extracting for feature analysis. LIWC is analyzed separately.
if analyze == True:
# Store in dict
all_features_dict['character_level'] = char_lvl
all_features_dict['word_level'] = word_lvl
all_features_dict['sentence_level'] = sent_lvl # Maybe pop pgrams matrix into separate var/container?
all_features_dict['document_level'] = doc_lvl
return sent_cv, all_features_dict # pass sent_cv on to analyze_feats from here.
# Old approaches
# Option 1 - extracting flat list (of n instances) (of dicts with n features) to vectorize in one go.
# for feat_cat, feature_name in feature_pms['engineered'].items():
# if feat_cat in all_features.keys():
# selected_features[feat_cat] = all_features[feat_cat].values()
# return selected_features
# TODO how to make sure that all features align? Pandas? hstack before fitting?
# Option 2 - extract individual lists of [{'feature1' : feature_value}... {'feature2' : feature_value}] for each feauture?
# Iterate through features to pass on, given parameters in parameter dict.
# Get a flat list of all desired target features.
#target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
# Lookup and retrieve each feature from all_features and store in selected_features
# Works, but return that awkward df with individual dicts.
# for feat_level, feat_name in all_features.items():# outer level {'feature_level': 'feature_name': [{'feature' : feature_val}]}
# for fn, fl in feat_name.items():
# if fn in target_feats:
# selected_features[fn] = fl
# Return selected features
# 2. return selectively for classification
if analyze == False:
# Get a flat list of all desired target features.
target_feats = list(itertools.chain.from_iterable([fn for fn in feature_pms['engineered'].values()]))
#return char_lvl, word_lvl, sent_lvl, doc_lvl
# Concatenate feature dfs for each level horizontally.
#all_feats_df = pd.concat([char_lvl, word_lvl, sent_lvl, doc_lvl], axis=1, join='inner') # works.
all_feats_df_list = [char_lvl, word_lvl, sent_lvl, doc_lvl]
# Mitigating duplicate indeces in dfs..
[df.reset_index(inplace=True, drop=True) for df in all_feats_df_list]
# 1.5 LIWC features
# parsed_liwc is called in the main namespace.
if feature_pms['liwc'] == True:
liwc_feats = pd.DataFrame.from_records(
liwc_match(parsed_liwc, data, extract=True))
#selected_features['liwc_counts'] = liwc_feats # store LIWC straight in selected_feats dict.
# index liwc_feats with data.index
liwc_feats.set_index(data.index, inplace=True)
all_feats_df_list.append(liwc_feats)
#return liwc_feats
#return sent_cv, all_features
# concat liwc features to df selected features.
# Concat all feature dfs.
#try:
all_feats_df = | pd.concat(all_feats_df_list, axis=1, join='inner') | pandas.concat |
import os
import random
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer, TfidfTransformer
from sumeval.metrics.rouge import RougeCalculator
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from summa.summarizer import summarize
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
from sumy.summarizers.luhn import LuhnSummarizer
from sumy.summarizers.lex_rank import LexRankSummarizer
from sumy.summarizers.text_rank import TextRankSummarizer
from sumy.summarizers.lsa import LsaSummarizer
# We can use ratio like a hyperparameter to control the length of the summary
def textrank(file, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read()
return summarize(doc, ratio=ratio)
def sumy_textrank(file, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read().split('\n')
n = len(doc)
parser = PlaintextParser(doc, Tokenizer("english"))
summarizer = TextRankSummarizer()
summaryinit = summarizer(parser.document, round(ratio * n))
summary = ''
for i in summaryinit:
summary += str(i)
summary = summary.replace(',', '')
return summary
def centroid(file, binary=False, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read().split('\n')
n = len(doc)
if binary:
tfidfv = TfidfVectorizer(stop_words=stopwords.words('english'), binary=True, use_idf=False, norm=False)
else:
tfidfv = TfidfVectorizer(stop_words=stopwords.words('english'))
tfidfmat = tfidfv.fit_transform(doc)
features = tfidfv.get_feature_names()
# centroiddf = pd.DataFrame(tfidfmat.todense(), index=doc, columns=features)
top_sents = np.concatenate((np.asarray(doc).reshape(-1, 1), tfidfmat.todense().sum(axis=1)), axis=1)
top_df = pd.DataFrame(top_sents, columns=['Sentence', 'TF-IDF-Score'])
top_df['TF-IDF-Score'] = pd.to_numeric(top_df['TF-IDF-Score'], errors='coerce')
top_df.sort_values(by='TF-IDF-Score', inplace=True, ascending=False)
top_df['Sentence'] = top_df['Sentence'].replace(',', '')
return '\n'.join(top_df['Sentence'][:round(ratio * n)])
def lsa(file, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read().split('\n')
n = len(doc)
parser = PlaintextParser(doc, Tokenizer("english"))
summarizer = LsaSummarizer()
summaryinit = summarizer(parser.document, round(ratio * n))
summary = ''
for i in summaryinit:
summary += str(i)
summary = summary.replace(',', '')
return summary
def lexrank(file, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read().split('\n')
n = len(doc)
parser = PlaintextParser(doc, Tokenizer("english"))
summarizer = LexRankSummarizer()
summaryinit = summarizer(parser.document, round(ratio * n))
summary = ''
for i in summaryinit:
summary += str(i)
summary = summary.replace(',', '')
return summary
def luhn(file, ratio=0.02):
with open(file, 'r', errors='ignore') as f:
doc = f.read().split('\n')
n = len(doc)
parser = PlaintextParser(doc, Tokenizer("english"))
summarizer = LuhnSummarizer()
summaryinit = summarizer(parser.document, round(ratio * n))
summary = ''
for i in summaryinit:
summary += str(i)
summary = summary.replace(',', '')
return summary
def init_matrix(file):
with open(file, 'r', errors='ignore') as f:
doc = f.read()
vocabulary = list(set(word_tokenize(doc)))
vocab_matrix = pd.DataFrame(0, index=vocabulary, columns=vocabulary)
return vocab_matrix
def store_positions_info(file, maxlength):
with open(file, 'r', errors='ignore') as f:
doc = f.read()
vocabulary = list(set(word_tokenize(doc)))
infodict = pd.DataFrame(0, index=vocabulary, columns=range(1, maxlength + 1))
for sentence in sent_tokenize(doc):
wordlist = word_tokenize(sentence)
for idx, word in enumerate(wordlist, 1):
infodict[idx][word] += 1
return infodict
def make_edges(file, vocab_matrix):
with open(file, 'r', errors='ignore') as f:
doc = f.read()
for sentence in sent_tokenize(doc):
wordlist = word_tokenize(sentence)
for i in range(len(wordlist) - 1):
vocab_matrix[wordlist[i]][wordlist[i + 1]] += 1
return vocab_matrix
def importance_scores(file, vocab_matrix, ends=False, mid=False):
sentence_scores = []
sentence_scores_mid = []
sentence_scores_ends = []
with open(file, 'r', errors='ignore') as f:
doc = f.read()
for sentence in sent_tokenize(doc):
score = 0
scoremid = 0
scoreends = 0
wordlist = word_tokenize(sentence)
sentlength = len(wordlist)
weights_middle = list(np.linspace(0, 1, sentlength // 2))
weights_middle.extend(list(np.linspace(1, 0, sentlength // 2)))
if sentlength % 2 == 0:
del weights_middle[sentlength // 2]
weights_ends = list(np.linspace(1, 0, sentlength // 2))
weights_ends.extend(list(np.linspace(0, 1, sentlength // 2)))
if sentlength % 2 == 0:
del weights_ends[sentlength // 2]
for idx in range(len(wordlist) - 1):
score += vocab_matrix[wordlist[idx]][wordlist[idx + 1]]
scoremid += vocab_matrix[wordlist[idx]][wordlist[idx + 1]] * weights_middle[idx]
scoreends += vocab_matrix[wordlist[idx]][wordlist[idx + 1]] * weights_ends[idx]
score /= len(wordlist)
scoremid /= len(wordlist)
scoreends /= len(wordlist)
sentence_scores.append([sentence, score])
sentence_scores_mid.append([sentence, scoremid])
sentence_scores_ends.append([sentence, scoreends])
imp_scores = | pd.DataFrame(sentence_scores, columns=['Sentence', 'Importance-Score']) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.